-
Notifications
You must be signed in to change notification settings - Fork 29.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
benchmark: support for multiple http benchmarkers #8140
Changes from 1 commit
3d14782
6113cec
721a1c5
52521b9
97b334a
687a64f
3a8a9c8
ed3ad0f
7cd3daa
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -88,52 +88,157 @@ Benchmark.prototype._queue = function(options) { | |
return queue; | ||
}; | ||
|
||
function hasWrk() { | ||
const result = child_process.spawnSync('wrk', ['-h']); | ||
if (result.error && result.error.code === 'ENOENT') { | ||
console.error('Couldn\'t locate `wrk` which is needed for running ' + | ||
'benchmarks. Check benchmark/README.md for further instructions.'); | ||
process.exit(1); | ||
} | ||
function AutocannonBenchmarker() { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Perhaps these abstractions should be moved to a separate file. I find this hard to read. |
||
const autocannon_exe = process.platform === 'win32' | ||
? 'autocannon.cmd' | ||
: 'autocannon'; | ||
this.present = function() { | ||
var result = child_process.spawnSync(autocannon_exe, ['-h']); | ||
if (result.error && result.error.code === 'ENOENT') | ||
return false; | ||
else | ||
return true; | ||
}; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Perhaps simplify this to just: return !(result.error && result.error.code === 'ENOENT'); |
||
this.create = function(path, duration, connections) { | ||
const args = ['-d', duration, '-c', connections, '-j', '-n', | ||
'http://127.0.0.1:' + exports.PORT + path ]; | ||
var child = child_process.spawn(autocannon_exe, args); | ||
child.stdout.setEncoding('utf8'); | ||
return child; | ||
}; | ||
this.processResults = function(output) { | ||
let result; | ||
try { | ||
result = JSON.parse(output); | ||
} catch (err) { | ||
// Do nothing, let next line handle this | ||
} | ||
if (!result || !result.requests || !result.requests.average) { | ||
return undefined; | ||
} else { | ||
return result.requests.average; | ||
} | ||
}; | ||
} | ||
|
||
// benchmark an http server. | ||
const WRK_REGEXP = /Requests\/sec:[ \t]+([0-9\.]+)/; | ||
Benchmark.prototype.http = function(urlPath, args, cb) { | ||
hasWrk(); | ||
const self = this; | ||
|
||
const urlFull = 'http://127.0.0.1:' + exports.PORT + urlPath; | ||
args = args.concat(urlFull); | ||
|
||
const childStart = process.hrtime(); | ||
const child = child_process.spawn('wrk', args); | ||
child.stderr.pipe(process.stderr); | ||
function WrkBenchmarker() { | ||
this.present = function() { | ||
var result = child_process.spawnSync('wrk', ['-h']); | ||
if (result.error && result.error.code === 'ENOENT') | ||
return false; | ||
else | ||
return true; | ||
}; | ||
this.create = function(path, duration, connections) { | ||
const args = ['-d', duration, '-c', connections, '-t', 8, | ||
'http://127.0.0.1:' + exports.PORT + path ]; | ||
var child = child_process.spawn('wrk', args); | ||
child.stdout.setEncoding('utf8'); | ||
child.stderr.pipe(process.stderr); | ||
return child; | ||
}; | ||
const regexp = /Requests\/sec:[ \t]+([0-9\.]+)/; | ||
this.processResults = function(output) { | ||
const match = output.match(regexp); | ||
const result = match && +match[1]; | ||
if (!result) | ||
return undefined; | ||
else | ||
return result; | ||
}; | ||
} | ||
|
||
// Collect stdout | ||
let stdout = ''; | ||
child.stdout.on('data', (chunk) => stdout += chunk.toString()); | ||
const HTTPBenchmarkers = { | ||
autocannon: new AutocannonBenchmarker(), | ||
wrk: new WrkBenchmarker() | ||
}; | ||
|
||
child.once('close', function(code) { | ||
const elapsed = process.hrtime(childStart); | ||
if (cb) cb(code); | ||
// Benchmark an http server. | ||
Benchmark.prototype.http = function(urlPath, duration, connections, cb) { | ||
const self = this; | ||
duration = 1; | ||
|
||
const picked_benchmarker = process.env.NODE_HTTP_BENCHMARKER || | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What is the reason to run all benchmarks by default? It takes quite a long time to run the http benchmarks, I don't think we need to add more to that by default. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If you have both tools installed, then I assume you want to use both. As for the time - each HTTP benchmark run by those tools takes 10s. On my box all of the HTTP benchmarks take 14 minutes with 1 tool, and only 3 minutes more with 2 both of them. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It sounds like something is wrong then. If each benchmarker takes 10 sec and it takes 14 min with one benchmarker, then it should take 28 min with two benchmarkers? I also disagree with the premise. The benchmarkers should be functionallity equivalent, and should thus give similar results. If they give very different results (in a non-linear propertional way) it's sounds like something is wrong. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Just to clarify the issue here (summing up from #7180): a) we want to be able to run the http benchmarks on Windows too, and it seem extremely hard to get @AndreasMadsen would you mind reviewing if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To clarify: not all benchmarks in There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @bzoz please see #8139, the issue is that
I'm all for adding autocannon for getting Windows support.
That is mostly an issue in when continuously monitoring performance. When just comparing a benchmark between master and a PR, that shouldn't be a problem.
It's actually impossible from a philosophy of science perspective to show this, however we can validate it. First I run the
script: https://gist.github.com/AndreasMadsen/619e4a447b8df7043f32771e64b7693f From this result we can see that the benchmarker does not have a statistical significant effect on the output. We can see this because there are no stars right to I guess one could do a more detailed analysis of the interactions between the benchmarker and the individual parameters, but those results are tricky to interpret because of paired correlation. I would say that since we can't observe a statistical significant effect we should assume that the benchmarker doesn't matter. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe we should just pick the first that is available on the system, if it is not specified. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll do that, pick first one available as default. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think we should prefer There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For the record: I have done a crude interaction analysis, it turns out that the benchmarker does affect performances when looking at a specific set of parameters:
table: relative performances improvement of using autocannon. however this doesn't say anything about the benchmarks ability to benchmark an optimization proposal. It just means that the benchmarkers aren't equally performant in all aspects. Really it just optimization suggests for the benchmarker implementers :) Anyway it was just for the record. edit: perhaps it is possible to make an equal variances tests instead of an equal mean test, that should express the benchmark ability of the benchmarkers. It is not something I normally do, so I will have to look it up. |
||
this.config.benchmarker || 'all'; | ||
const benchmarkers = picked_benchmarker === 'all' | ||
? Object.keys(HTTPBenchmarkers) | ||
: [picked_benchmarker]; | ||
|
||
// See if any benchmarker is available. Also test if all used benchmarkers | ||
// are defined | ||
var any_available = false; | ||
for (var i = 0; i < benchmarkers.length; ++i) { | ||
const benchmarker = benchmarkers[i]; | ||
const http_benchmarker = HTTPBenchmarkers[benchmarker]; | ||
if (http_benchmarker === undefined) { | ||
console.error('Unknown http benchmarker: ', benchmarker); | ||
process.exit(1); | ||
} | ||
if (http_benchmarker.present()) { | ||
any_available = true; | ||
} | ||
} | ||
if (!any_available) { | ||
console.error('Couldn\'t locate any of the required http benchmarkers ' + | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: s/Couldn't/Could not |
||
'(' + benchmarkers.join(', ') + '). Check ' + | ||
'benchmark/README.md for further instructions.'); | ||
process.exit(1); | ||
} | ||
|
||
if (code) { | ||
console.error('wrk failed with ' + code); | ||
process.exit(code); | ||
function runHttpBenchmarker(index, collected_code) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. v8 optimizations is going to affect the next results if you don't restart node.js. I think it would be much better not to support this at all on the var bench = common.createBenchmark(main, {
num: [1, 4, 8, 16],
size: [1, 64, 256],
c: [100],
benchmarker: ['wrk', 'autocannon'] /* will run both */
});
function main(conf) {
bench.http({
url: '/',
duration: 10,
connections: conf.c,
benchmarker: conf.benchmarker
}, function () { ... });
} There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good point. Just tested it, and second run is up to 10% faster. |
||
// All benchmarkers executed | ||
if (index === benchmarkers.length) { | ||
if (cb) | ||
cb(collected_code); | ||
if (collected_code !== 0) | ||
process.exit(1); | ||
return; | ||
} | ||
|
||
// Extract requests pr second and check for odd results | ||
const match = stdout.match(WRK_REGEXP); | ||
if (!match || match.length <= 1) { | ||
console.error('wrk produced strange output:'); | ||
console.error(stdout); | ||
process.exit(1); | ||
// Run next benchmarker | ||
const benchmarker = benchmarkers[index]; | ||
self.config.benchmarker = benchmarker; | ||
|
||
const http_benchmarker = HTTPBenchmarkers[benchmarker]; | ||
if (http_benchmarker.present()) { | ||
const child_start = process.hrtime(); | ||
var child = http_benchmarker.create(urlPath, duration, connections); | ||
|
||
// Collect stdout | ||
let stdout = ''; | ||
child.stdout.on('data', (chunk) => stdout += chunk.toString()); | ||
|
||
child.once('close', function(code) { | ||
const elapsed = process.hrtime(child_start); | ||
if (code) { | ||
if (stdout === '') { | ||
console.error(benchmarker + ' failed with ' + code); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: `${benchmarker} failed with ${code}` (that is, using template strings here and elsewhere throughout) |
||
} else { | ||
console.error(benchmarker + ' failed with ' + code + '. Output: '); | ||
console.error(stdout); | ||
} | ||
runHttpBenchmarker(index + 1, code); | ||
return; | ||
} | ||
|
||
var result = http_benchmarker.processResults(stdout); | ||
if (!result) { | ||
console.error(benchmarker + ' produced strange output'); | ||
console.error(stdout); | ||
runHttpBenchmarker(index + 1, 1); | ||
return; | ||
} | ||
|
||
self.report(result, elapsed); | ||
runHttpBenchmarker(index + 1, collected_code); | ||
}); | ||
} else { | ||
runHttpBenchmarker(index + 1, collected_code); | ||
} | ||
} | ||
|
||
// Report rate | ||
self.report(+match[1], elapsed); | ||
}); | ||
// Run with all benchmarkers | ||
runHttpBenchmarker(0, 0); | ||
}; | ||
|
||
Benchmark.prototype._run = function() { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think this is a good idea. The options to
createBenchmark
should just be the benchmark parameters. If you want this feature, I think it would be much better to add an option object tobench.http
.