Skip to content

Instantly share code, notes, and snippets.

@paulirish
Last active April 2, 2024 16:50
Show Gist options
  • Star 29 You must be signed in to star a gist
  • Fork 3 You must be signed in to fork a gist
  • Save paulirish/2fad3834e2617fb199bc12e17058dde4 to your computer and use it in GitHub Desktop.
Save paulirish/2fad3834e2617fb199bc12e17058dde4 to your computer and use it in GitHub Desktop.
Evaluating overhead of performance.mark()

A few conversations have circled around user-side structural profiling. For context, see React PR #7549: Show React events in the timeline when ReactPerf is active

One particular concern is the measurement overhead. This gist has a benchmarking script (measure.js) for evaluating overhead and initial results.

Results: performance.mark()

Runs about 0.65µs per mark() call. Naturally, that's ~= an overhead of 1ms for 1500 mark()s. image

Results: performance.measure()

Calls to measure() are not significantly more expensive than calls to mark(). They appears to be ~30% more expensive than a mark(), but still only clocking in at ~0.85µs per measure() call.

(If benchmarking, be sure to run timeline against your benchmark; I've seen large DOM GC's when calling measure >50,000 times a second. :)

Results: console.time()

console.time is much more expensive at ~11.5 µs/call (thats ~18x slower than perf.mark()). Therefore, it's not recommended. Also it spams your console, so it's inconvenient to use for high-volume work.

image

console.clear();
var instrumentedTaskRuns = 10000;
var runscount = 20;
var overheadresults = [];
var isMeasuring = false;
var marksPerTask = 2 + (isMeasuring ? 1 : 0);
function doRuns(){
if (runscount <= 0) return console.log('done');
// can change this to instrumentedConsoleTimeRun
instrumentedRun = instrumentedPerfMarkRun;
instrumentingtotal = instrumentedRun();
notinstrumentingtotal = unInstrumentedRun();
var overheadpertask = (instrumentingtotal - notinstrumentingtotal) / (instrumentedTaskRuns * marksPerTask);
var overheadMicro = Number((overheadpertask * 1000).toFixed(3));
var baselineStr = ` Baseline work cost per run: ${notinstrumentingtotal.toFixed(2)}ms`;
console.log(`Overhead of ${instrumentedTaskRuns.toLocaleString()} ${instrumentedRun.callname} calls:`, overheadMicro, "µs/call.", baselineStr);
overheadresults.push(overheadpertask);
runscount--;
// doRuns();
setTimeout(doRuns); // yield evt loop
}
// no instrumentation
function unInstrumentedRun(){
var overallstart = performance.now();
for (var i = 0; i < instrumentedTaskRuns; i += 2) {
doSomeWork()
}
var total = performance.now() - overallstart;
return total;
}
// using performance.mark();
function instrumentedPerfMarkRun(){
var overallstart = performance.now();
performance.mark('overall');
for (var i = 0; i < instrumentedTaskRuns; i += 2) {
performance.mark(i)
doSomeWork()
performance.mark(i + 1)
isMeasuring && performance.measure(i, i, i + 1);
}
performance.mark('overallEnd');
var total = performance.now() - overallstart;
// these measure calls are not included in cost, as they can be done outside of the critical path.
// for (var i = 0; i < instrumentedTaskRuns; i += 2) {
// performance.measure(i, i, i + 1)
// }
performance.measure('overall', 'overall', 'overallEnd')
return total;
}
instrumentedPerfMarkRun.callname = 'perf.mark()';
// using console.time();
function instrumentedConsoleTimeRun(){
var overallstart = performance.now();
console.time('overall');
for (var i = 0; i < instrumentedTaskRuns; i += 2) {
console.time(i)
doSomeWork()
console.timeEnd(i)
}
console.timeEnd('overall');
var total = performance.now() - overallstart;
return total;
}
instrumentedConsoleTimeRun.callname = 'console.time()';
// just some arbitrary work for the VM
function doSomeWork() {
var i = 1000;
var s = 0;
while (i--)
s += i * i * i;
return s;
}
// doRuns();
setTimeout(_ => { doRuns(); },0);
function standardDeviation(values){
var avg = average(values);
var squareDiffs = values.map(function(value){
var diff = value - avg;
var sqrDiff = diff * diff;
return sqrDiff;
});
var avgSquareDiff = average(squareDiffs);
var stdDev = Math.sqrt(avgSquareDiff);
return stdDev;
}
function average(data){
var sum = data.reduce(function(sum, value){
return sum + value;
}, 0);
var avg = sum / data.length;
return avg;
}
function median(values) {
values.sort( function(a,b) {return a - b;} );
var half = Math.floor(values.length/2);
if(values.length % 2)
return values[half];
else
return (values[half-1] + values[half]) / 2.0;
}
function ninetyth(values) {
values.sort( function(a,b) {return a - b;} );
var half = Math.floor(values.length * .9);
if(values.length % 2)
return values[half];
else
return (values[half-1] + values[half]) / 2.0;
}
@justinfagnani
Copy link

justinfagnani commented Sep 15, 2016

Hey Paul, a question about:

Note: A mature implementation will do calls measure() lazily, outside of the critical path.

Wouldn't a mature implementation of measure() do it's work lazily, outside of the critical path? Why is use code in a better position to defer the expensive parts of measure()?

@paulirish
Copy link
Author

@justinfagnani, I just ran numbers and it looks like measure is not at all a bottleneck. IMO not worth deferring it.

@Meir017
Copy link

Meir017 commented Apr 25, 2022

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment