Skip to content

Instantly share code, notes, and snippets.

@nitedani
Last active April 29, 2022 20:41
Show Gist options
  • Save nitedani/b78121af6559bc14c80b5165d80b7950 to your computer and use it in GitHub Desktop.
Save nitedani/b78121af6559bc14c80b5165d80b7950 to your computer and use it in GitHub Desktop.
Detect memory spikes, capture and upload cpu and heap profiles to s3.
CAPTURE_AWS_REGION=
CAPTURE_AWS_ACCESS_KEY=
CAPTURE_AWS_SECRET_KEY=
CAPTURE_AWS_BUCKET=
CAPTURE_MEM_TRIGGER_MB=500
CAPTURE_MEM_LIMIT_MB=1000
CAPTURE_TIME_LIMIT_SEC=20
import { S3 } from 'aws-sdk';
import dotenv from 'dotenv';
import Inspector from 'inspector-api';
dotenv.config();
const ensure = (variables: string[]) => {
const undefinedVars = variables.filter(
(variable) => !(variable in process.env),
);
if (undefinedVars.length) {
console.error(`${undefinedVars.join(',')} is undefined`);
process.exit(1);
}
};
ensure([
'CAPTURE_AWS_ACCESS_KEY',
'CAPTURE_AWS_SECRET_KEY',
'CAPTURE_AWS_BUCKET',
'CAPTURE_AWS_REGION',
'CAPTURE_MEM_TRIGGER_MB',
'CAPTURE_MEM_LIMIT_MB',
'CAPTURE_TIME_LIMIT_SEC',
]);
const captureLimitMB = parseInt(process.env.CAPTURE_MEM_LIMIT_MB, 10);
const timeLimitSeconds = parseInt(process.env.CAPTURE_TIME_LIMIT_SEC, 10);
const inspector = new Inspector();
const s3Client = new S3({
correctClockSkew: true,
region: process.env.CAPTURE_AWS_REGION,
credentials: {
accessKeyId: process.env.CAPTURE_AWS_ACCESS_KEY,
secretAccessKey: process.env.CAPTURE_AWS_SECRET_KEY,
},
});
const getMem = () => Math.round(process.memoryUsage().rss / 1024 / 1024);
// Create a file name from date
const getDate = () => {
const date = new Date();
return `${date.getFullYear()}-${
date.getMonth() + 1
}-${date.getDate()}-${date.getHours()}-${date.getMinutes()}-${date.getSeconds()}`;
};
let detectAllocationTimeout = null;
let detectTimeout = null;
export const runProfilerLoop = async () => {
await Promise.all([inspector.profiler.enable(), inspector.heap.enable()]);
if (detectTimeout) {
clearTimeout(detectTimeout);
detectTimeout = null;
}
detectTimeout = setTimeout(async () => {
const startMB = await getMem();
if (startMB > parseInt(process.env.CAPTURE_MEM_TRIGGER_MB, 10)) {
clearTimeout(detectTimeout);
console.log('Starting profiler...');
await Promise.all([
inspector.profiler.start(),
inspector.heap.startSampling(),
]);
const now = Date.now();
let maxMB = startMB;
const detectAllocationLoop = () => {
if (detectAllocationTimeout) {
clearTimeout(detectAllocationTimeout);
detectAllocationTimeout = null;
}
detectAllocationTimeout = setTimeout(async () => {
const endMB = await getMem();
if (endMB > maxMB) {
maxMB = endMB;
}
const elapsedSeconds = (Date.now() - now) / 1000;
if (endMB > captureLimitMB || elapsedSeconds > timeLimitSeconds) {
clearTimeout(detectAllocationTimeout);
await Promise.all([
inspector.profiler.stop().then(async (data) => {
console.log(
`Captured cpu profile, ${elapsedSeconds}s duration, ${startMB}MB start, ${endMB}MB end, ${maxMB}MB peak`,
);
console.log(`Uploading cpu profile to S3...`);
const key = `${startMB}MB-${getDate()}.cpuprofile`;
await s3Client
.upload({
Bucket: process.env.CAPTURE_AWS_BUCKET,
Key: key,
Body: JSON.stringify(data),
})
.promise();
}),
inspector.heap.stopSampling().then(async (data) => {
console.log(
`Captured heap profile, ${elapsedSeconds}s duration, ${startMB}MB start, ${endMB}MB end, ${maxMB}MB peak`,
);
console.log(`Uploading heap profile to S3...`);
const key = `${startMB}MB-${getDate()}.heapprofile`;
await s3Client
.upload({
Bucket: process.env.CAPTURE_AWS_BUCKET,
Key: key,
Body: JSON.stringify(data),
})
.promise();
}),
]);
await new Promise((resolve) => setTimeout(resolve, 10000));
runProfilerLoop();
return;
}
detectAllocationLoop();
}, 1);
};
detectAllocationLoop();
} else {
runProfilerLoop();
}
}, 1);
};
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment