Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

benchmark: support for multiple http benchmarkers #8140

Closed
Closed
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 1 addition & 8 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -620,13 +620,6 @@ ifeq ($(XZ), 0)
ssh $(STAGINGSERVER) "touch nodejs/$(DISTTYPEDIR)/$(FULLVERSION)/node-$(FULLVERSION)-$(OSTYPE)-$(ARCH).tar.xz.done"
endif

haswrk=$(shell which wrk > /dev/null 2>&1; echo $$?)
wrk:
ifneq ($(haswrk), 0)
@echo "please install wrk before proceeding. More information can be found in benchmark/README.md." >&2
@exit 1
endif

bench-net: all
@$(NODE) benchmark/run.js net

Expand All @@ -636,7 +629,7 @@ bench-crypto: all
bench-tls: all
@$(NODE) benchmark/run.js tls

bench-http: wrk all
bench-http: all
@$(NODE) benchmark/run.js http

bench-fs: all
Expand Down
65 changes: 62 additions & 3 deletions benchmark/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,25 @@ This folder contains benchmarks to measure the performance of the Node.js APIs.

## Prerequisites

Most of the http benchmarks require [`wrk`][wrk] to be installed. It may be
available through your preferred package manager. If not, `wrk` can be built
[from source][wrk] via `make`.
Most of the HTTP benchmarks require a benchmarker to be installed, this can be
either [`wrk`][wrk] or [`autocannon`][autocannon].

`Autocannon` is a Node script that can be installed using
`npm install -g autocannon`. It will use the Node executable that is in the
path, hence if you want to compare two HTTP benchmark runs make sure that the
Node version in the path is not altered.

`wrk` may be available through your preferred package manger. If not, you can
easily build it [from source][wrk] via `make`.

By default `wrk` will be used as benchmarker. If it is not available
`autocannon` will be used in it its place. When creating a HTTP benchmark you
can specify which benchmarker should be used. You can force a specific
benchmarker to be used by providing it as an argument, e. g.:

`node benchmark/run.js --set benchmarker=autocannon http`

`node benchmark/http/simple.js benchmarker=autocannon`

To analyze the results `R` should be installed. Check you package manager or
download it from https://www.r-project.org/.
Expand Down Expand Up @@ -287,5 +303,48 @@ function main(conf) {
}
```

## Creating HTTP benchmark

The `bench` object returned by `createBenchmark` implements
`http(options, callback)` method. It can be used to run external tool to
benchmark HTTP servers.

```js
'use strict';

const common = require('../common.js');

const bench = common.createBenchmark(main, {
kb: [64, 128, 256, 1024],
connections: [100, 500]
});

function main(conf) {
const http = require('http');
const len = conf.kb * 1024;
const chunk = Buffer.alloc(len, 'x');
const server = http.createServer(function(req, res) {
res.end(chunk);
});

server.listen(common.PORT, function() {
bench.http({
connections: conf.connections,
}, function() {
server.close();
});
});
}
```

Supported options keys are:
* `port` - defaults to `common.PORT`
* `path` - defaults to `/`
* `connections` - number of concurrent connections to use, defaults to 100
* `duration` - duration of the benchmark in seconds, defaults to 10
* `benchmarker` - benchmarker to use, defaults to
`common.default_http_benchmarker`

[autocannon]: https://github.com/mcollina/autocannon
[wrk]: https://github.com/wg/wrk
[t-test]: https://en.wikipedia.org/wiki/Student%27s_t-test#Equal_or_unequal_sample_sizes.2C_unequal_variances
130 changes: 130 additions & 0 deletions benchmark/_http-benchmarkers.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
'use strict';

const child_process = require('child_process');

// The port used by servers and wrk
exports.PORT = process.env.PORT || 12346;

function AutocannonBenchmarker() {
this.name = 'autocannon';
this.autocannon_exe = process.platform === 'win32'
? 'autocannon.cmd'
: 'autocannon';
const result = child_process.spawnSync(this.autocannon_exe, ['-h']);
this.present = !(result.error && result.error.code === 'ENOENT');
}

AutocannonBenchmarker.prototype.create = function(options) {
const args = ['-d', options.duration, '-c', options.connections, '-j', '-n',
`http://127.0.0.1:${options.port}${options.path}` ];
const child = child_process.spawn(this.autocannon_exe, args);
return child;
};

AutocannonBenchmarker.prototype.processResults = function(output) {
let result;
try {
result = JSON.parse(output);
} catch (err) {
// Do nothing, let next line handle this
}
if (!result || !result.requests || !result.requests.average) {
return undefined;
} else {
return result.requests.average;
}
};

function WrkBenchmarker() {
this.name = 'wrk';
this.regexp = /Requests\/sec:[ \t]+([0-9\.]+)/;
const result = child_process.spawnSync('wrk', ['-h']);
this.present = !(result.error && result.error.code === 'ENOENT');
}

WrkBenchmarker.prototype.create = function(options) {
const args = ['-d', options.duration, '-c', options.connections, '-t', 8,
`http://127.0.0.1:${options.port}${options.path}` ];
const child = child_process.spawn('wrk', args);
return child;
};

WrkBenchmarker.prototype.processResults = function(output) {
const match = output.match(this.regexp);
const result = match && +match[1];
if (!result) {
return undefined;
} else {
return result;
}
};

const http_benchmarkers = [ new WrkBenchmarker(),
new AutocannonBenchmarker() ];

const benchmarkers = {};

http_benchmarkers.forEach((benchmarker) => {
benchmarkers[benchmarker.name] = benchmarker;
if (!exports.default_http_benchmarker && benchmarker.present) {
exports.default_http_benchmarker = benchmarker.name;
}
});

exports.run = function(options, callback) {
options = Object.assign({
port: exports.PORT,
path: '/',
connections: 100,
duration: 10,
benchmarker: exports.default_http_benchmarker
}, options);
if (!options.benchmarker) {
callback(new Error('Could not locate any of the required http ' +
'benchmarkers. Check benchmark/README.md for further ' +
'instructions.'));
return;
}
var benchmarker = benchmarkers[options.benchmarker];
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

use const

if (!benchmarker) {
callback(new Error(`Requested benchmarker '${options.benchmarker}' is ` +
'not supported'));
return;
}
if (!benchmarker.present) {
callback(new Error(`Requested benchmarker '${options.benchmarker}' is ` +
'not installed'));
return;
}

const benchmarker_start = process.hrtime();

var child = benchmarker.create(options);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

use const


child.stderr.pipe(process.stderr);

let stdout = '';
child.stdout.on('data', (chunk) => stdout += chunk.toString());

child.once('close', function(code) {
const elapsed = process.hrtime(benchmarker_start);
if (code) {
var error_message = `${options.benchmarker} failed with ${code}.`;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

use let

if (stdout !== '') {
error_message += ` Output: ${stdout}`;
}
callback(new Error(error_message), code);
return;
}

const result = benchmarker.processResults(stdout);
if (!result) {
callback(new Error(`${options.benchmarker} produced strange output: ` +
stdout, code));
return;
}

callback(null, code, options.benchmarker, result, elapsed);
});

};
89 changes: 36 additions & 53 deletions benchmark/common.js
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
'use strict';

const child_process = require('child_process');

// The port used by servers and wrk
exports.PORT = process.env.PORT || 12346;
const http_benchmarkers = require('./_http-benchmarkers.js');

exports.createBenchmark = function(fn, options) {
return new Benchmark(fn, options);
};

function Benchmark(fn, options) {
this.name = require.main.filename.slice(__dirname.length + 1);
this.options = this._parseArgs(process.argv.slice(2), options);
const parsed_args = this._parseArgs(process.argv.slice(2), options);
this.options = parsed_args.cli;
this.extra_options = parsed_args.extra;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see any reason to introduce extra_options. The problem was just a bug in how the type was inferred, not how we handle options as a whole.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we just add those to options they will be displayed when running other benchmarks, even unrelated ones. It does not look, and could be confusing. So I store those extra options elsewhere and apply them only to related benchmarks

this.queue = this._queue(this.options);
this.config = this.queue[0];

Expand All @@ -29,7 +29,7 @@ function Benchmark(fn, options) {

Benchmark.prototype._parseArgs = function(argv, options) {
const cliOptions = Object.assign({}, options);

const extraOptions = {};
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: avoid noisy style changes.

// Parse configuration arguments
for (const arg of argv) {
const match = arg.match(/^(.+?)=([\s\S]*)$/);
Expand All @@ -38,14 +38,16 @@ Benchmark.prototype._parseArgs = function(argv, options) {
process.exit(1);
}

// Infer the type from the options object and parse accordingly
const isNumber = typeof options[match[1]][0] === 'number';
const value = isNumber ? +match[2] : match[2];

cliOptions[match[1]] = [value];
if (options[match[1]]) {
// Infer the type from the options object and parse accordingly
const isNumber = typeof options[match[1]][0] === 'number';
const value = isNumber ? +match[2] : match[2];
cliOptions[match[1]] = [value];
} else {
extraOptions[match[1]] = match[2];
}
}

return cliOptions;
return { cli: cliOptions, extra: extraOptions };
};

Benchmark.prototype._queue = function(options) {
Expand Down Expand Up @@ -88,51 +90,29 @@ Benchmark.prototype._queue = function(options) {
return queue;
};

function hasWrk() {
const result = child_process.spawnSync('wrk', ['-h']);
if (result.error && result.error.code === 'ENOENT') {
console.error('Couldn\'t locate `wrk` which is needed for running ' +
'benchmarks. Check benchmark/README.md for further instructions.');
process.exit(1);
}
}
// Benchmark an http server.
exports.default_http_benchmarker =
http_benchmarkers.default_http_benchmarker;
exports.PORT = http_benchmarkers.PORT;

// benchmark an http server.
const WRK_REGEXP = /Requests\/sec:[ \t]+([0-9\.]+)/;
Benchmark.prototype.http = function(urlPath, args, cb) {
hasWrk();
Benchmark.prototype.http = function(options, cb) {
const self = this;

const urlFull = 'http://127.0.0.1:' + exports.PORT + urlPath;
args = args.concat(urlFull);

const childStart = process.hrtime();
const child = child_process.spawn('wrk', args);
child.stderr.pipe(process.stderr);

// Collect stdout
let stdout = '';
child.stdout.on('data', (chunk) => stdout += chunk.toString());

child.once('close', function(code) {
const elapsed = process.hrtime(childStart);
if (cb) cb(code);

if (code) {
console.error('wrk failed with ' + code);
process.exit(code);
const http_options = Object.assign({ }, options);
http_options.benchmarker = http_options.benchmarker ||
self.config.benchmarker ||
self.extra_options.benchmarker ||
exports.default_http_benchmarker;
http_benchmarkers.run(http_options, function(error, code, used_benchmarker,
result, elapsed) {
if (cb) {
cb(code);
}

// Extract requests pr second and check for odd results
const match = stdout.match(WRK_REGEXP);
if (!match || match.length <= 1) {
console.error('wrk produced strange output:');
console.error(stdout);
process.exit(1);
if (error) {
console.error(error);
process.exit(code || 1);
}

// Report rate
self.report(+match[1], elapsed);
self.config.benchmarker = used_benchmarker;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure if it's a good idea to mutate the config object.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This way user does not need to pick benchmarker - we will inject default one here if none was selected. Besides logging, nothing more is done with 'self' object from this point.

self.report(result, elapsed);
});
};

Expand All @@ -152,6 +132,9 @@ Benchmark.prototype._run = function() {
for (const key of Object.keys(config)) {
childArgs.push(`${key}=${config[key]}`);
}
for (const key of Object.keys(self.extra_options)) {
childArgs.push(`${key}=${self.extra_options[key]}`);
}

const child = child_process.fork(require.main.filename, childArgs, {
env: childEnv
Expand Down
6 changes: 3 additions & 3 deletions benchmark/http/chunked.js
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ function main(conf) {
const http = require('http');
var chunk = Buffer.alloc(conf.size, '8');

var args = ['-d', '10s', '-t', 8, '-c', conf.c];

var server = http.createServer(function(req, res) {
function send(left) {
if (left === 0) return res.end();
Expand All @@ -34,7 +32,9 @@ function main(conf) {
});

server.listen(common.PORT, function() {
bench.http('/', args, function() {
bench.http({
connections: conf.c
}, function() {
server.close();
});
});
Expand Down
Loading