diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6f83976f..a043c638 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -113,6 +113,15 @@ functions: add_expansions_to_env: true args: - .evergreen/run-granular-benchmarks.sh + run custom benchmarks: + - command: subprocess.exec + type: test + params: + working_dir: src + binary: bash + add_expansions_to_env: true + args: + - .evergreen/run-custom-benchmarks.sh run spec benchmarks: - command: subprocess.exec type: test @@ -246,6 +255,19 @@ tasks: - command: perf.send params: file: src/test/bench/etc/resultsCollectedMeans.json + - name: run-custom-benchmarks + commands: + - func: fetch source + vars: + # This needs to stay pinned at Node v18.16.0 for consistency across perf runs. + NODE_LTS_VERSION: v18.16.0 + - func: install dependencies + vars: + NPM_VERSION: 9 + - func: run custom benchmarks + - command: perf.send + params: + file: src/customBenchmarkResults.json - name: run-spec-benchmarks commands: - func: fetch source @@ -300,3 +322,4 @@ buildvariants: tasks: - run-granular-benchmarks - run-spec-benchmarks + - run-custom-benchmarks diff --git a/.evergreen/run-custom-benchmarks.sh b/.evergreen/run-custom-benchmarks.sh new file mode 100644 index 00000000..2984cb35 --- /dev/null +++ b/.evergreen/run-custom-benchmarks.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +source "${PROJECT_DIRECTORY}/.evergreen/init-node-and-npm-env.sh" +set -o xtrace + +npm run check:custom-bench diff --git a/.gitignore b/.gitignore index 39b98b7b..b2ee050b 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,4 @@ docs/public .nvmrc benchmarks.json +customBenchmarkResults.json diff --git a/package.json b/package.json index fd1fa890..6fed86fa 100644 --- a/package.json +++ b/package.json @@ -105,6 +105,7 @@ "check:web-no-bigint": "WEB=true NO_BIGINT=true mocha test/node", "check:granular-bench": "npm run build:bench && node ./test/bench/etc/run_granular_benchmarks.js", "check:spec-bench": "npm run build:bench && node ./test/bench/lib/spec/bsonBench.js", + "check:custom-bench": "npm run build && node ./test/bench/custom/main.mjs", "build:bench": "cd test/bench && npx tsc", "build:ts": "node ./node_modules/typescript/bin/tsc", "build:dts": "npm run build:ts && api-extractor run --typescript-compiler-folder node_modules/typescript --local && node etc/clean_definition_files.cjs", diff --git a/test/bench/custom/benchmarks.mjs b/test/bench/custom/benchmarks.mjs new file mode 100644 index 00000000..74170fb9 --- /dev/null +++ b/test/bench/custom/benchmarks.mjs @@ -0,0 +1,22 @@ +/* eslint-disable strict */ +import { BSON } from '../../../lib/bson.mjs'; + +const ObjectId_isValid = [ + function objectid_isvalid_wrong_string_length() { + BSON.ObjectId.isValid('a'); + }, + /** wrong character at the start, could be the most short circuited code path */ + function objectid_isvalid_invalid_hex_at_start() { + BSON.ObjectId.isValid('g6e84ebdc96f4c0772f0cbbf'); + }, + /** wrong character at the end, could be the least short circuited code path */ + function objectid_isvalid_invalid_hex_at_end() { + BSON.ObjectId.isValid('66e84ebdc96f4c0772f0cbbg'); + }, + function objectid_isvalid_valid_hex_string() { + BSON.ObjectId.isValid('66e84ebdc96f4c0772f0cbbf'); + } +]; + +// Add benchmarks here: +export const benchmarks = [...ObjectId_isValid]; diff --git a/test/bench/custom/main.mjs b/test/bench/custom/main.mjs new file mode 100644 index 00000000..b375418f --- /dev/null +++ b/test/bench/custom/main.mjs @@ -0,0 +1,39 @@ +/* eslint-disable strict */ + +import util from 'node:util'; +import fs from 'node:fs'; +import os from 'node:os'; +import benchmark from 'benchmark'; +import { benchmarks } from './benchmarks.mjs'; + +const hw = os.cpus(); +const ram = os.totalmem() / 1024 ** 3; +const platform = { name: hw[0].model, cores: hw.length, ram: `${ram}GB` }; + +const systemInfo = () => + [ + `\n- cpu: ${platform.name}`, + `- cores: ${platform.cores}`, + `- arch: ${os.arch()}`, + `- os: ${process.platform} (${os.release()})`, + `- ram: ${platform.ram}\n` + ].join('\n'); +console.log(systemInfo()); + +const suite = new benchmark.Suite(); + +for (const bench of benchmarks) suite.add(bench.name, bench); + +suite + .on('cycle', function logBenchmark(event) { + console.log(String(event.target)); + }) + .on('complete', function outputPerfSend() { + const data = Array.from(this).map(bench => ({ + info: { test_name: bench.name }, + metrics: [{ name: 'ops_per_sec', value: bench.hz }] + })); + console.log(util.inspect(data, { depth: Infinity, colors: true })); + fs.writeFileSync('customBenchmarkResults.json', JSON.stringify(data), 'utf8'); + }) + .run(); diff --git a/test/bench/custom/readme.md b/test/bench/custom/readme.md new file mode 100644 index 00000000..ed69ae21 --- /dev/null +++ b/test/bench/custom/readme.md @@ -0,0 +1,38 @@ +# Custom Benchmark Tests + +In this directory are tests for code paths not covered by our spec or granular (de)serialization benchmarks. + +## How to write your own + +In `benchmarks.mjs` add a new test to an existing array or make a new array for a new subject area. +Try to fit the name of the function into the format of: "subject area", "method or function" "test case that is being covered" (Ex. `objectid_isvalid_bestcase_false`). +Make sure your test is added to the `benchmarks` export. + +### Example + +```js +const ObjectId_isValid = [ + function objectid_isvalid_strlen() { + BSON.ObjectId.isValid('a'); + }, + // ... +]; + +export const benchmarks = [...ObjectId_isValid]; +``` + +## Output + +The JSON emitted at the end of the benchmarks must follow our performance tracking format. + +The JSON must be an array of "`Test`"s: + +```ts +type Metric = { name: string, value: number } +type Test = { + info: { test_name: string }, + metrics: Metric[] +} +``` + +The metric collected is always "ops_per_sec" so higher is better.