Skip to content

Test benchmarks pipeline #27

Test benchmarks pipeline

Test benchmarks pipeline #27

Workflow file for this run

name: Benchmark
on:
pull_request:
types: [opened, synchronize, reopened, labeled]
permissions:
contents: read
pull-requests: write
jobs:
benchmarks:
if: contains(github.event.pull_request.labels.*.name, 'request-benchmarks')
runs-on: ubuntu-latest
steps:
- name: Install wrk
run: |
sudo apt-get update
sudo apt-get install -y wrk
- name: Checkout Source Branch
uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.ref }}
path: source
- name: Setup Node.js for Source Branch
uses: actions/setup-node@v3
with:
node-version: '22.x'
- name: Install Dependencies for Source Branch
run: |
cd source
npm install
- name: Run Benchmarks on Source Branch
run: |
cd source/benchmarks
make > source_results.log
- name: Save Source Results
id: save-source-results
uses: actions/upload-artifact@v2
with:
name: source-results
path: source/benchmarks/source_results.log
- name: Checkout Target Branch
uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.base.ref }}
path: target
- name: Setup Node.js for Target Branch
uses: actions/setup-node@v3
with:
node-version: '22.x'
- name: Install Dependencies for Target Branch
run: |
cd target
npm install
- name: Run Benchmarks on Target Branch
run: |
cd target/benchmarks
make > target_results.log
- name: Save Target Results
id: save-target-results
uses: actions/upload-artifact@v2
with:
name: target-results
path: target/benchmarks/target_results.log
- name: Download Source Results
uses: actions/download-artifact@v2
with:
name: source-results
path: source-results
- name: Download Target Results
uses: actions/download-artifact@v2
with:
name: target-results
path: target-results
- name: Compare Results and Comment on PR
uses: actions/github-script@v6
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
PR_HEAD_REF: ${{ github.event.pull_request.head.ref }}
PR_BASE_REF: ${{ github.event.pull_request.base.ref }}
PR_COMMENTS_URL: ${{ github.event.pull_request.comments_url }}
with:
script: |
const fs = require('fs');
// Ensure both results files exist
if (!fs.existsSync('source-results/source_results.log') || !fs.existsSync('target-results/target_results.log')) {
console.log("Results files not found!");
return;
}
// Read results into arrays (assuming each line is a metric in the format "MetricName: Value")
const sourceResults = fs.readFileSync('source-results/source_results.log', 'utf8').split('\n');
const targetResults = fs.readFileSync('target-results/target_results.log', 'utf8').split('\n');
// Start the Markdown table using template strings for better readability
let commentBody = `### Benchmark Comparison
| Metric | Source Branch Value | Target Branch Value |
|--------|----------------------|----------------------|
`;
// Assuming the metrics are the same and in the same order for both source and target
sourceResults.forEach((line, i) => {
if (line.trim() === '') return; // Skip empty lines
const [sourceMetricName, sourceMetricValue] = line.split(':');
const targetMetricValue = targetResults[i].split(':')[1];
commentBody += `| ${sourceMetricName} | ${sourceMetricValue} | ${targetMetricValue} |
`;
});
// Post the comment to the PR
github.rest.issues.createComment({
issue_number: context.env.PR_NUMBER,
owner: context.repo.owner,
repo: context.repo.repo,
body: commentBody,
});
continue-on-error: true
- name: Remove request-benchmarks Label
run: |
curl -s -X DELETE -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/json" \
"${{ github.event.pull_request.issue_url }}/labels/request-benchmarks"