Skip to content

Resource Limits Quick Reference

Resource Limits Quick Reference

Default Limits

ResourceDefaultMaxTimeout
Query Timeout30s5mConfigurable per-query
Connections10,000Configurable5m idle
Connections/User100Configurable-
Query Memory1GBConfigurable-
Connection Memory512MBConfigurable-
Total Memory8GBConfigurable-
File Descriptors10,000Configurable5m idle

Quick Start

Initialize Resource Manager

use heliosdb_storage::{ResourceLimitManager, ResourceLimitsConfig};
// Use defaults
let manager = ResourceLimitManager::default_config();
// Custom config
let config = ResourceLimitsConfig {
// ... customize limits
..Default::default()
};
let manager = ResourceLimitManager::new(config);

Execute Query with Timeout

use heliosdb_compute::{ResourceAwareExecutor, QueryContext};
let executor = ResourceAwareExecutor::new(base_executor, manager);
// With default timeout (30s)
let result = executor.execute(plan, "user1".to_string()).await?;
// With custom timeout
let context = QueryContext::new("user1".to_string())
.with_timeout(60_000);
let result = executor.execute_with_timeout(plan, context).await?;

Manage Connections

use heliosdb_storage::ConnectionPool;
let pool = ConnectionPool::new(PoolConfig::default());
// Add and acquire
let conn_id = pool.add_connection(conn, "user1".to_string()).await?;
let guard = pool.acquire("user1").await?;
// Auto-cleanup
pool.cleanup_idle().await;

Track Memory

use heliosdb_storage::{MemoryManager, AllocationType};
let manager = MemoryManager::new(limits);
// Allocate
manager.allocate("id", 512, AllocationType::Query, "owner")?;
// Check pressure
if manager.is_under_pressure() {
// Handle pressure
}
// Deallocate
manager.deallocate("id");

Manage File Descriptors

use heliosdb_storage::{FileDescriptorManager, FileHandleType};
let manager = FileDescriptorManager::new(limits);
// Acquire handle
manager.acquire_handle("id", "/path", FileHandleType::SSTable, "owner")?;
// Cleanup
manager.cleanup_idle_handles();
// Release
manager.release_handle("id")?;

Monitoring

Get Statistics

// Overall statistics
let stats = resource_manager.get_statistics();
// Memory statistics
let mem_stats = memory_manager.get_statistics();
// Pool statistics
let pool_stats = pool.stats();
// FD statistics
let fd_stats = fd_manager.get_statistics();

Pressure Levels

  • Normal: < 60% usage
  • Low: 60-75% usage
  • Medium: 75-85% usage (throttle new allocations)
  • High: 85-95% usage (evict caches)
  • Critical: > 95% usage (reject new allocations)

Error Handling

match result {
Err(HeliosError::ResourceLimit(msg)) => {
// Handle resource limit error
match msg.as_str() {
s if s.contains("timeout") => /* Query timeout */,
s if s.contains("connection") => /* Connection limit */,
s if s.contains("memory") => /* Memory limit */,
_ => /* Other resource error */,
}
}
Ok(data) => // Process data
}

Configuration Patterns

Production Settings

ResourceLimitsConfig {
query: QueryConfig {
timeout_ms: 60_000, // 1 minute
max_timeout_ms: 600_000, // 10 minutes
auto_cancel: true,
},
connections: ConnectionLimits {
max_connections: 10_000,
max_per_user: 200,
idle_timeout_ms: 600_000, // 10 minutes
},
memory: MemoryLimits {
max_query_memory_mb: 2048, // 2GB
max_total_memory_mb: 16384, // 16GB
pressure_threshold: 0.80, // 80%
},
file_descriptors: FileDescriptorLimits {
max_open_files: 50_000,
handle_timeout_ms: 600_000, // 10 minutes
auto_cleanup: true,
},
}

Development Settings

ResourceLimitsConfig {
query: QueryConfig {
timeout_ms: 300_000, // 5 minutes
max_timeout_ms: 3_600_000, // 1 hour
auto_cancel: false, // Debug mode
},
connections: ConnectionLimits {
max_connections: 100,
max_per_user: 10,
idle_timeout_ms: 60_000, // 1 minute
},
memory: MemoryLimits {
max_query_memory_mb: 512,
max_total_memory_mb: 2048,
pressure_threshold: 0.90,
pressure_detection_enabled: false, // Debug mode
},
file_descriptors: FileDescriptorLimits {
max_open_files: 1_000,
auto_cleanup: true,
},
}

Troubleshooting

Query Timeouts

// Increase timeout for specific query
let context = QueryContext::new(user).with_timeout(120_000);
// Check current config
let config = manager.get_config();
println!("Timeout: {}ms, Max: {}ms",
config.query.timeout_ms,
config.query.max_timeout_ms);

Connection Limits

// Check current usage
let stats = manager.get_statistics();
println!("Connections: {}/{}",
stats.total_connections,
stats.max_connections);
// Cleanup idle connections
let cleaned = manager.cleanup_idle_connections();

Memory Pressure

// Check pressure
let level = memory_manager.get_pressure_level();
println!("Pressure: {:?}", level);
// Get eviction candidates
let candidates = memory_manager.suggest_evictions(1024);
for id in candidates {
memory_manager.deallocate(&id);
}

File Descriptor Exhaustion

// Check usage
let stats = fd_manager.get_statistics();
println!("Open: {}/{}", stats.open_files, stats.max_files);
// Force close LRU handles
let closed = fd_manager.force_close_lru(100);
// Cleanup by type
let cleaned = fd_manager.cleanup_by_type(FileHandleType::Temp);

Best Practices

  1. Always use ResourceAwareExecutor for query execution
  2. Monitor pressure levels and respond proactively
  3. Set appropriate timeouts based on query complexity
  4. Use connection pooling instead of direct connections
  5. Track memory allocations by type for better visibility
  6. Enable auto-cleanup in production
  7. Configure limits based on available system resources
  8. Test under load to tune limits appropriately
  9. Monitor statistics regularly
  10. Handle resource errors gracefully

Common Patterns

Retry with Backoff

let mut attempts = 0;
let max_attempts = 3;
while attempts < max_attempts {
match executor.execute(plan, user).await {
Ok(result) => return Ok(result),
Err(HeliosError::ResourceLimit(_)) if attempts < max_attempts - 1 => {
attempts += 1;
tokio::time::sleep(Duration::from_millis(100 * attempts)).await;
}
Err(e) => return Err(e),
}
}

Graceful Degradation

if memory_manager.is_under_pressure() {
let level = memory_manager.get_pressure_level();
match level {
PressureLevel::Medium => {
// Evict non-essential caches
cache.evict_cold_entries();
}
PressureLevel::High => {
// Evict all caches
cache.clear();
}
PressureLevel::Critical => {
// Reject new requests
return Err(HeliosError::ResourceLimit(
"System under critical memory pressure".into()
));
}
_ => {}
}
}

Background Cleanup

// Spawn background cleanup task
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(60));
loop {
interval.tick().await;
// Cleanup idle connections
pool.cleanup_idle().await;
// Cleanup idle file handles
fd_manager.cleanup_idle_handles();
// Check memory pressure
if memory_manager.is_under_pressure() {
let candidates = memory_manager.suggest_evictions(1024);
for id in candidates {
memory_manager.deallocate(&id);
}
}
}
});

For detailed documentation, see /home/claude/HeliosDB/docs/RESOURCE_LEAK_PREVENTION_COMPLETE.md