Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion bench/module-cost/.gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
commonjs/*
esm/*
CPU*
CPU*
benchmark-results-*.json
5 changes: 5 additions & 0 deletions bench/module-cost/components/client.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ import { useEffect, useRef, useState } from 'react'
import { format, measure } from '../lib/measure'

function report(result, element, textarea) {
if (!globalThis.BENCHMARK_RESULTS) {
globalThis.BENCHMARK_RESULTS = []
}
globalThis.BENCHMARK_RESULTS.push(result)

const formattedResult = format(result)
element.textContent += `: ${formattedResult}`
textarea.current.value += `\n ${formattedResult}`
Expand Down
4 changes: 3 additions & 1 deletion bench/module-cost/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
"name": "module-cost",
"scripts": {
"prepare-bench": "node scripts/prepare-bench.mjs",
"benchmark": "node scripts/benchmark-runner.mjs",
"dev-webpack": "next dev",
"dev-turbopack": "next dev --turbo",
"build-webpack": "next build",
Expand All @@ -10,6 +11,7 @@
},
"devDependencies": {
"rimraf": "6.0.1",
"next": "workspace:*"
"next": "workspace:*",
"playwright": "^1.40.0"
}
}
255 changes: 255 additions & 0 deletions bench/module-cost/scripts/benchmark-runner.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,255 @@
import { spawn } from 'node:child_process'
import { writeFileSync } from 'node:fs'
import { chromium } from 'playwright'

/// To use:
/// - Install Playwright: `npx playwright install chromium`
/// - Install dependencies: `pnpm install`
/// - Build the application: `pnpm build-webpack` or pnpm build-turbopack`
/// - Run the benchmark: `pnpm benchmark`

class BenchmarkRunner {
constructor(options) {
this.name = options.name
this.samples = options.samples ?? 50
this.buttonClickDelay = options.buttonClickDelay ?? 500
this.results = []
}

async runBenchmark() {
for (let i = 1; i <= this.samples; i++) {
console.log(`\n--- Running sample ${i}/${this.samples} ---`)

const result = await this.runSingleSample()
this.results.push(...result)
}

this.saveResults()
console.log('\nBenchmark completed!')
}

async runSingleSample() {
let server
let browser

try {
// 1. Launch the server
server = await this.startServer()

// 2. Launch Chrome incognito
console.log('Launching browser...')
browser = await chromium.launch({
headless: true, // Set to true if you don't want to see the browser
args: ['--incognito'],
})

const context = await browser.newContext()
const page = await context.newPage()

// 3. Navigate to localhost:3000
await page.goto('http://localhost:3000', { waitUntil: 'load' })

// 4. Find and click all buttons
const buttons = await page.locator('button').all()

for (let j = 0; j < buttons.length; j++) {
await buttons[j].click()
await this.sleep(this.buttonClickDelay)
}

// 5. Capture data from textbox
console.log('Capturing data from the page...')
const textboxData = await this.capturePageData(page)
console.log('Captured data from the page:', textboxData)

// 6. Close browser
console.log('Closing browser...')
await browser.close()
browser = null

// 7. Shut down server
console.log('Shutting down server...')
await this.stopServer(server)
server = null

return textboxData
} catch (error) {
// Cleanup in case of error
if (browser) {
try {
await browser.close()
} catch (e) {
console.error('Error closing browser:', e.message)
}
}
if (server) {
try {
await this.stopServer(server)
} catch (e) {
console.error('Error stopping server:', e.message)
}
}
throw error
}
}

async startServer() {
return new Promise((resolve, reject) => {
const server = spawn('pnpm', ['start'], {
stdio: ['pipe', 'pipe', 'pipe'],
shell: true,
})

let serverReady = false

server.stdout.on('data', (data) => {
const output = data.toString()
console.log('Server:', output.trim())

// Look for common Next.js ready indicators
if (
output.includes('Ready') ||
output.includes('started server') ||
output.includes('Local:')
) {
if (!serverReady) {
serverReady = true
resolve(server)
}
}
})

server.stderr.on('data', (data) => {
console.error('Server Error:', data.toString().trim())
})

server.on('error', (error) => {
reject(new Error(`Failed to start server: ${error.message}`))
})

server.on('close', (code) => {
if (!serverReady) {
reject(
new Error(`Server exited with code ${code} before becoming ready`)
)
}
})

// Timeout after 30 seconds
setTimeout(() => {
if (!serverReady) {
server.kill()
reject(new Error('Server startup timeout'))
}
}, 30000)
})
}

async stopServer(server) {
return new Promise((resolve) => {
if (!server || server.killed) {
resolve()
return
}

server.on('close', () => {
resolve()
})

// Try graceful shutdown first
server.kill('SIGTERM')

// Force kill after 5 seconds
setTimeout(() => {
if (!server.killed) {
server.kill('SIGKILL')
}
resolve()
}, 5000)
})
}

async capturePageData(page) {
return await page.evaluate(() => globalThis.BENCHMARK_RESULTS)
}

async sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms))
}

saveResults() {
const timestamp = new Date().toISOString().replace(/[:.]/g, '-')
const filename = `benchmark-results-${this.name}-${timestamp}.json`
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The filename construction will include "undefined" when no benchmark name is provided via CLI arguments.

View Details

Analysis

In the saveResults() method, the filename is constructed using this.name which can be undefined when no name is passed via CLI arguments. This results in filenames like benchmark-results-undefined-2024-01-01T10-00-00-000Z.json, which is not user-friendly and could cause confusion when managing multiple benchmark runs.

The constructor assigns this.name = options.name without any fallback, and the CLI parsing at line 248 assigns name = args.length > 1 ? args[1] : undefined, so this.name will be undefined when no second CLI argument is provided.


Recommendation

Provide a default name when this.name is undefined. In the saveResults() method, replace line 182 with:

const filename = `benchmark-results-${this.name || 'default'}-${timestamp}.json`

Alternatively, set a default in the constructor:

this.name = options.name || 'default'


writeFileSync(
filename,
JSON.stringify(summarizeDurations(this.results), null, 2)
)
console.log(`Results saved to ${filename}`)
}
}

const summarizeDurations = (data) => {
if (!Array.isArray(data) || data.length === 0) {
throw new Error('No data to summarize')
}

const byName = new Map()
for (const item of data) {
const name = item.name
if (!byName.has(name)) {
byName.set(name, [])
}
byName.get(name).push(item)
}
const results = []
for (const [name, data] of byName) {
const loadDurations = data
.map((item) => item.loadDuration)
.sort((a, b) => a - b)
const executeDurations = data
.map((item) => item.executeDuration)
.sort((a, b) => a - b)

const getSummary = (durations) => {
const sum = durations.reduce((acc, val) => acc + val, 0)
const average = sum / durations.length

const middle = Math.floor(durations.length / 2)
const median =
durations.length % 2 === 0
? (durations[middle - 1] + durations[middle]) / 2
: durations[middle]

const percentile75Index = Math.floor(durations.length * 0.75)
const percentile75 = durations[percentile75Index]

return {
average,
median,
percentile75,
}
}

results.push({
name,
totalSamples: data.length,
loadDuration: getSummary(loadDurations),
executeDuration: getSummary(executeDurations),
})
}

return results
}

// CLI usage
const args = process.argv.slice(2)
const samples = args.length > 0 ? Number.parseInt(args[0]) : undefined
Comment on lines +246 to +247
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The current parsing logic doesn't handle non-numeric inputs gracefully. If args[0] isn't a valid number, Number.parseInt() will return NaN which could cause unexpected behavior when used as the sample count.

Consider adding validation:

const parsedSamples = Number.parseInt(args[0]);
const samples = args.length > 0 && !isNaN(parsedSamples) ? parsedSamples : undefined;

This ensures the benchmark only uses numeric values for the sample count.

Suggested change
const args = process.argv.slice(2)
const samples = args.length > 0 ? Number.parseInt(args[0]) : undefined
const args = process.argv.slice(2)
const parsedSamples = Number.parseInt(args[0])
const samples = args.length > 0 && !isNaN(parsedSamples) ? parsedSamples : undefined

Spotted by Diamond

Is this helpful? React 👍 or 👎 to let us know.

const name = args.length > 1 ? args[1] : undefined

const runner = new BenchmarkRunner({
name,
samples,
})

runner.runBenchmark().catch(console.error)
3 changes: 3 additions & 0 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading