Optimize Node.js application performance with caching, clustering, profiling, and monitoring techniques
Optimize Node.js performance with caching, clustering, profiling, and monitoring techniques. Use this when your app is slow, needs to handle high traffic, or requires database query optimization and memory management.
/plugin marketplace add pluginagentmarketplace/custom-plugin-nodejs/plugin install nodejs-developer-plugin@pluginagentmarketplace-nodejsThis skill inherits all available tools. When active, it can use any tool Claude has access to.
assets/config.yamlreferences/GUIDE.mdscripts/helper.pyMaster Node.js performance optimization for fast, scalable, and efficient backend applications.
Optimize in 4 areas:
const redis = require('redis');
const client = redis.createClient({ url: process.env.REDIS_URL });
// Cache middleware
async function cacheMiddleware(req, res, next) {
const key = `cache:${req.originalUrl}`;
const cached = await client.get(key);
if (cached) {
return res.json(JSON.parse(cached));
}
// Override res.json to cache response
const originalJson = res.json.bind(res);
res.json = (data) => {
client.setEx(key, 3600, JSON.stringify(data)); // 1 hour
originalJson(data);
};
next();
}
// Usage
app.get('/api/users', cacheMiddleware, getUsers);
const cluster = require('cluster');
const os = require('os');
if (cluster.isMaster) {
const numCPUs = os.cpus().length;
console.log(`Master ${process.pid} starting ${numCPUs} workers`);
for (let i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', (worker) => {
console.log(`Worker ${worker.process.pid} died, restarting...`);
cluster.fork();
});
} else {
// Worker process - start Express server
const app = require('./app');
app.listen(3000, () => {
console.log(`Worker ${process.pid} started`);
});
}
// PostgreSQL pool
const { Pool } = require('pg');
const pool = new Pool({
max: 20, // Max connections
min: 5, // Min connections
idleTimeoutMillis: 30000
});
// MongoDB with Mongoose
mongoose.connect(uri, {
maxPoolSize: 10,
minPoolSize: 5
});
// ❌ Bad: N+1 query problem
const users = await User.find();
for (const user of users) {
user.posts = await Post.find({ userId: user.id }); // N queries
}
// ✅ Good: Single query with join
const users = await User.find().populate('posts'); // 1 query
// Add indexes
userSchema.index({ email: 1 }, { unique: true });
userSchema.index({ createdAt: -1 });
// Use lean() for read-only queries (faster)
const users = await User.find().lean(); // Returns plain objects
async function getUsers(req, res) {
const page = parseInt(req.query.page) || 1;
const limit = parseInt(req.query.limit) || 10;
const skip = (page - 1) * limit;
const [users, total] = await Promise.all([
User.find().limit(limit).skip(skip),
User.countDocuments()
]);
res.json({
data: users,
pagination: {
page,
limit,
total,
pages: Math.ceil(total / limit)
}
});
}
const compression = require('compression');
app.use(compression({
level: 6,
threshold: 1024 // Only compress > 1KB
}));
app.get('/api/static-data', (req, res) => {
res.set('Cache-Control', 'public, max-age=3600'); // Cache 1 hour
res.json(data);
});
// For frequently changing data
res.set('Cache-Control', 'public, max-age=60'); // Cache 1 minute
// ❌ Sequential (300ms)
const users = await getUsers(); // 100ms
const posts = await getPosts(); // 100ms
const comments = await getComments(); // 100ms
// ✅ Parallel (100ms)
const [users, posts, comments] = await Promise.all([
getUsers(),
getPosts(),
getComments()
]);
const fs = require('fs');
// ❌ Bad: Load entire file into memory
app.get('/large-file', async (req, res) => {
const data = await fs.promises.readFile('large.txt');
res.send(data);
});
// ✅ Good: Stream file
app.get('/large-file', (req, res) => {
const stream = fs.createReadStream('large.txt');
stream.pipe(res);
});
# Start with profiler
node --prof app.js
# Generate readable output
node --prof-process isolate-0x*.log > profile.txt
const { performance } = require('perf_hooks');
const start = performance.now();
await heavyOperation();
const end = performance.now();
console.log(`Operation took ${end - start}ms`);
// ❌ Bad: Memory leak
const cache = {};
app.get('/data/:id', (req, res) => {
cache[req.params.id] = data; // Never cleaned up
res.json(data);
});
// ✅ Good: Use LRU cache with limits
const LRU = require('lru-cache');
const cache = new LRU({
max: 500, // Max 500 items
maxAge: 1000 * 60 * 60 // TTL: 1 hour
});
const used = process.memoryUsage();
console.log({
rss: `${Math.round(used.rss / 1024 / 1024)}MB`,
heapTotal: `${Math.round(used.heapTotal / 1024 / 1024)}MB`,
heapUsed: `${Math.round(used.heapUsed / 1024 / 1024)}MB`
});
const winston = require('winston');
const logger = winston.createLogger({
level: 'info',
format: winston.format.json(),
transports: [
new winston.transports.File({ filename: 'error.log', level: 'error' }),
new winston.transports.File({ filename: 'combined.log' })
]
});
// Request logging
app.use((req, res, next) => {
const start = Date.now();
res.on('finish', () => {
logger.info('HTTP Request', {
method: req.method,
url: req.url,
status: res.statusCode,
duration: Date.now() - start
});
});
next();
});
# load-test.yml
config:
target: 'http://localhost:3000'
phases:
- duration: 60
arrivalRate: 10
scenarios:
- name: "Get users"
flow:
- get:
url: "/api/users"
artillery run load-test.yml
NODE_ENV=production)// production.js
if (process.env.NODE_ENV === 'production') {
// Trust proxy (for load balancer)
app.set('trust proxy', 1);
// Disable x-powered-by header
app.disable('x-powered-by');
// Enable compression
app.use(compression());
// Use production logger
app.use(productionLogger());
}
Optimize performance when:
This skill should be used when the user asks to "create a slash command", "add a command", "write a custom command", "define command arguments", "use command frontmatter", "organize commands", "create command with file references", "interactive command", "use AskUserQuestion in command", or needs guidance on slash command structure, YAML frontmatter fields, dynamic arguments, bash execution in commands, user interaction patterns, or command development best practices for Claude Code.
This skill should be used when the user asks to "create an agent", "add an agent", "write a subagent", "agent frontmatter", "when to use description", "agent examples", "agent tools", "agent colors", "autonomous agent", or needs guidance on agent structure, system prompts, triggering conditions, or agent development best practices for Claude Code plugins.
This skill should be used when the user asks to "create a hook", "add a PreToolUse/PostToolUse/Stop hook", "validate tool use", "implement prompt-based hooks", "use ${CLAUDE_PLUGIN_ROOT}", "set up event-driven automation", "block dangerous commands", or mentions hook events (PreToolUse, PostToolUse, Stop, SubagentStop, SessionStart, SessionEnd, UserPromptSubmit, PreCompact, Notification). Provides comprehensive guidance for creating and implementing Claude Code plugin hooks with focus on advanced prompt-based hooks API.