feat: update the footer #491
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Package Manager Benchmarks | |
on: | |
push: | |
workflow_dispatch: | |
inputs: | |
fixtures: | |
description: 'The fixture to run the benchmarks on' | |
default: '["next", "astro", "vue", "svelte"]' | |
variations: | |
description: 'The benchmark variations to run' | |
default: '["clean", "node_modules", "cache", "cache+node_modules", "cache+lockfile", "cache+lockfile+node_modules", "lockfile", "lockfile+node_modules"]' | |
binaries: | |
description: 'The binaries to run the benchmarks on' | |
default: '"npm,yarn,berry,pnpm,vlt,bun,deno,nx,turbo,node"' | |
warmup: | |
description: 'The number of warmup runs on each benchmark' | |
default: '2' | |
runs: | |
description: 'The number of runs on each benchmark' | |
default: '10' | |
schedule: | |
- cron: "0 0 * * *" | |
# Prevent multiple runs from interfering with each other | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.ref }} | |
cancel-in-progress: true | |
jobs: | |
benchmark: | |
name: 'Run Benchmarks' | |
runs-on: ubuntu-latest | |
timeout-minutes: 60 | |
strategy: | |
matrix: | |
fixture: ${{ fromJson(inputs.fixtures || '["next", "astro", "vue", "svelte"]') }} | |
variation: ${{ fromJson(inputs.variations || '["clean", "node_modules", "cache", "cache+node_modules", "cache+lockfile", "cache+lockfile+node_modules", "lockfile", "lockfile+node_modules"]') }} | |
include: | |
- variation: "run" | |
fixture: "run" | |
env: | |
BENCH_INCLUDE: ${{ fromJson(inputs.binaries || '"npm,yarn,berry,pnpm,vlt,bun,deno,nx,turbo,node"') }} | |
BENCH_WARMUP: ${{ inputs.warmup || '2' }} | |
BENCH_RUNS: ${{ inputs.runs || '10' }} | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Install Node | |
uses: actions/setup-node@v4 | |
with: | |
node-version: '24' | |
- name: Install & Setup Tools | |
run: | | |
bash ./scripts/setup.sh | |
- name: Run Benchmarks variations | |
run: | | |
if [ "${{ matrix.variation }}" = "run" ]; then | |
bash ./scripts/benchmark.sh run run | |
else | |
bash ./scripts/benchmark.sh ${{ matrix.fixture }} ${{ matrix.variation }} | |
fi | |
- name: Upload Benchmark Results | |
uses: actions/upload-artifact@v4 | |
with: | |
name: results-${{ matrix.fixture }}-${{ matrix.variation }} | |
path: ./results/${{ matrix.fixture }}/${{ matrix.variation }}/ | |
retention-days: 7 | |
- name: Upload Versions Info | |
uses: actions/upload-artifact@v4 | |
with: | |
name: versions-${{ matrix.fixture }}-${{ matrix.variation }} | |
path: ./results/versions.json | |
retention-days: 7 | |
process: | |
name: 'Process Results' | |
runs-on: ubuntu-latest | |
needs: [benchmark] | |
timeout-minutes: 5 | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Install Node | |
uses: actions/setup-node@v4 | |
with: | |
node-version: '24' | |
- name: Download Results | |
uses: actions/download-artifact@v4 | |
with: | |
path: results | |
pattern: results-* | |
- name: Download Versions | |
uses: actions/download-artifact@v4 | |
with: | |
path: versions-temp | |
pattern: versions-* | |
- name: Clean benchmarks result | |
uses: actions/github-script@v7 | |
with: | |
script: | | |
const fs = require('fs'); | |
const path = require('path'); | |
// Define fixture and variation values from the strategy matrix | |
const fixtures = [ | |
"next", | |
"astro", | |
"vue", | |
"svelte", | |
"run" | |
]; | |
const variations = [ | |
"cache", | |
"cache+lockfile", | |
"cache+node_modules", | |
"cache+lockfile+node_modules", | |
"clean", | |
"lockfile", | |
"lockfile+node_modules", | |
"node_modules", | |
"run" | |
]; | |
// Helper functions for statistical calculations | |
function calculateMean(times) { | |
return times.reduce((sum, time) => sum + time, 0) / times.length; | |
} | |
function calculateStddev(times, mean) { | |
const variance = times.reduce((sum, time) => sum + Math.pow(time - mean, 2), 0) / times.length; | |
return Math.sqrt(variance); | |
} | |
function calculateMedian(times) { | |
const sorted = [...times].sort((a, b) => a - b); | |
const mid = Math.floor(sorted.length / 2); | |
return sorted.length % 2 === 0 | |
? (sorted[mid - 1] + sorted[mid]) / 2 | |
: sorted[mid]; | |
} | |
// Clean benchmark results | |
for (const fixture of fixtures) { | |
for (const variation of variations) { | |
// we only handle one specific combination for run in which both its | |
// fixture and variation are named run. if it's anything else, we skip it. | |
const skipInvalidRunFixtures = variation === "run" && fixture !== "run"; | |
const skipInvalidRunVariations = fixture === "run" && variation !== "run"; | |
if (skipInvalidRunFixtures || skipInvalidRunVariations) { | |
continue; | |
} | |
const benchmarkPath = path.join('results', `results-${fixture}-${variation}`, 'benchmarks.json'); | |
try { | |
console.log(`Cleaning benchmark file: ${benchmarkPath}`); | |
const benchmarkData = JSON.parse(fs.readFileSync(benchmarkPath, 'utf8')); | |
if (benchmarkData.results && benchmarkData.results.length > 0) { | |
for (let i = 0; i < benchmarkData.results.length; i++) { | |
const result = benchmarkData.results[i]; | |
const { times, exit_codes } = result; | |
if (times && exit_codes && times.length === exit_codes.length) { | |
// Filter out times where exit_codes is not 0 | |
const cleanTimes = times.filter((time, index) => exit_codes[index] === 0); | |
const cleanExitCodes = exit_codes.filter(code => code === 0); | |
if (cleanTimes.length > 0) { | |
// Recalculate statistics | |
const mean = calculateMean(cleanTimes); | |
const stddev = calculateStddev(cleanTimes, mean); | |
const median = calculateMedian(cleanTimes); | |
const min = Math.min(...cleanTimes); | |
const max = Math.max(...cleanTimes); | |
// Update the result object | |
result.times = cleanTimes; | |
result.exit_codes = cleanExitCodes; | |
result.mean = mean; | |
result.stddev = stddev; | |
result.median = median; | |
result.min = min; | |
result.max = max; | |
console.log(`Cleaned ${fixture}-${variation} (result ${i}): ${times.length - cleanTimes.length} failed runs removed, ${cleanTimes.length} valid runs remaining`); | |
} else { | |
console.warn(`All runs failed for ${fixture}-${variation} (result ${i})`); | |
} | |
} else { | |
console.warn(`Invalid times/exit_codes arrays for ${fixture}-${variation} (result ${i})`); | |
} | |
} | |
// Save the cleaned data back to the file | |
fs.writeFileSync(benchmarkPath, JSON.stringify(benchmarkData, null, 2)); | |
} else { | |
console.warn(`No results found in ${benchmarkPath}`); | |
} | |
} catch (error) { | |
console.error(`Failed to clean ${benchmarkPath}: ${error.message}`); | |
} | |
} | |
} | |
console.log('Benchmark cleaning completed'); | |
- name: Copy Versions File | |
run: | | |
# Find any versions.json file from the downloaded versions artifacts and copy it to results | |
find versions-temp -name "versions.json" -type f | head -1 | xargs -I {} cp {} ./results/versions.json || echo "No versions.json found" | |
- name: Process Results | |
run: | | |
bash ./scripts/process-results.sh | |
- name: Install vlt | |
run: | | |
npm install -g vlt@latest | |
- name: Build Charts View | |
run: | | |
pushd app | |
vlt install || true | |
vlt run build | |
popd | |
- name: Upload Processed Results | |
uses: actions/upload-artifact@v4 | |
with: | |
name: results | |
path: results/ | |
retention-days: 7 | |
deploy: | |
name: 'Deploy Results' | |
runs-on: ubuntu-latest | |
needs: [process] | |
permissions: | |
contents: write | |
if: github.ref == 'refs/heads/main' | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Download Results | |
uses: actions/download-artifact@v4 | |
with: | |
name: results | |
path: results/ | |
- name: Deploy to GitHub Pages | |
uses: peaceiris/actions-gh-pages@v3 | |
with: | |
github_token: ${{ secrets.GITHUB_TOKEN }} | |
publish_dir: results | |
keep_files: true | |