# Container memory
docker stats nova-backend --no-stream
# System memory
free -h
# Process memory
ps aux --sort=-%mem | head -10
# Node.js heap usage
docker exec -it nova-backend node -e "console.log(process.memoryUsage())"
# Monitor over time
watch -n 5 'docker exec -it nova-backend node -e "console.log(process.memoryUsage())"'
# Generate heap snapshot
docker exec -it nova-backend node -e "
const v8 = require('v8');
const fs = require('fs');
const snapshot = v8.writeHeapSnapshot();
console.log('Snapshot written to:', snapshot);
"
# Copy snapshot for analysis
docker cp nova-backend:/app/[snapshot-file] ./
# Look for memory warnings
docker logs nova-backend 2>&1 | grep -i "memory\|heap\|gc"
# Check for large data processing
docker logs nova-backend 2>&1 | grep -i "processing\|loading"
Symptoms: Gradually increasing memory, never released
Solution:
# Immediate: Restart service
docker restart nova-backend
# Analyze heap snapshot with Chrome DevTools
# Identify leaked objects
# Fix code and deploy
# Common leak sources:
# - Event listeners not removed
# - Global variables accumulating data
# - Closures holding references
# - Caching without limits
Symptoms: Memory spikes during specific operations
Solution:
# Implement streaming for large data
# Use pagination
# Process data in chunks
# Clear data after processing
# Example: Stream large query results
const stream = pool.query(new QueryStream('SELECT * FROM large_table'));
stream.on('data', processRow);
Symptoms: Memory increases with cache size
Solution:
# Check Redis memory
docker exec -it nova-redis redis-cli INFO memory
# Set memory limits
docker exec -it nova-redis redis-cli CONFIG SET maxmemory 1gb
docker exec -it nova-redis redis-cli CONFIG SET maxmemory-policy allkeys-lru
# Implement cache eviction in application
# Set TTL on cached items
# Limit cache size
Symptoms: Memory increases with connection count
Solution:
# Check connection pool
docker exec -it nova-postgres psql -U nova -d nova_rewards -c "
SELECT count(*) FROM pg_stat_activity;"
# Reduce pool size
# In backend code:
const pool = new Pool({
max: 20, // Reduce from 50
idleTimeoutMillis: 30000
});
Symptoms: Memory spikes during API calls
Solution:
// Limit request body size
app.use(express.json({ limit: '1mb' }));
// Stream large responses
res.setHeader('Content-Type', 'application/json');
stream.pipe(res);
# Quick memory release
docker restart nova-backend
# docker-compose.yml
services:
backend:
mem_limit: 2g
mem_reservation: 1g
# If --expose-gc flag enabled
docker exec -it nova-backend node -e "global.gc(); console.log('GC forced')"
# Clear Redis cache
docker exec -it nova-redis redis-cli FLUSHDB
# Clear application cache
curl -X POST http://backend:4000/api/admin/cache/clear
// Set Node.js memory limit
node --max-old-space-size=1024 server.js
// Remove event listeners
emitter.removeListener('event', handler);
// Clear intervals/timeouts
clearInterval(intervalId);
// Close connections
connection.close();
// Stream large files
const stream = fs.createReadStream('large-file.json');
stream.pipe(res);
// Stream database results
const queryStream = new QueryStream('SELECT * FROM large_table');
// LRU cache with size limit
const LRU = require('lru-cache');
const cache = new LRU({
max: 500,
maxAge: 1000 * 60 * 60
});