diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..167b5563bf --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,503 @@ +name: Benchmark Workflow + +on: + # https://github.com/mxschmitt/action-tmate?tab=readme-ov-file#manually-triggered-debug + workflow_dispatch: + inputs: + debug_enabled: + description: 'Enable SSH access (⚠️ Security Risk - read workflow comments)' + required: false + default: false + type: boolean + routes: + description: 'Comma-separated routes to benchmark (e.g., "/,/hello"). Leave empty to auto-detect from Rails.' + required: false + type: string + rate: + description: 'Requests per second (use "max" for maximum throughput)' + required: false + default: 'max' + type: string + duration: + description: 'Duration (e.g., "30s", "1m", "90s")' + required: false + default: '30s' + type: string + request_timeout: + description: 'Request timeout (e.g., "60s", "1m", "90s")' + required: false + default: '60s' + type: string + connections: + description: 'Concurrent connections/virtual users (also used as max)' + required: false + default: 10 + type: number + web_concurrency: + description: 'Number of Puma worker processes' + required: false + default: 4 + type: number + rails_threads: + description: 'Number of Puma threads (min and max will be same)' + required: false + default: 3 + type: number + tools: + description: 'Comma-separated list of tools to run' + required: false + default: 'fortio,vegeta,k6' + type: string + app_version: + description: 'Which app version to benchmark' + required: false + default: 'both' + type: choice + options: + - 'both' + - 'core_only' + - 'pro_only' + push: + branches: + - master + pull_request: + +env: + FORTIO_VERSION: "1.73.0" + K6_VERSION: "1.3.0" + VEGETA_VERSION: "12.13.0" + # Determine which apps to run (default is 'pro_only' for all triggers) + RUN_CORE: ${{ (github.event.inputs.app_version || 'both') != 'pro_only' && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} + # Benchmark parameters (defaults in bench.rb unless overridden here for CI) + ROUTES: ${{ github.event.inputs.routes }} + RATE: ${{ github.event.inputs.rate || 'max' }} + DURATION: ${{ github.event.inputs.duration }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} + CONNECTIONS: ${{ github.event.inputs.connections }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + TOOLS: ${{ github.event.inputs.tools }} + +jobs: + benchmark: + runs-on: ubuntu-latest + env: + SECRET_KEY_BASE: 'dummy-secret-key-for-ci-testing-not-used-in-production' + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE_V2 }} + + steps: + # ============================================ + # STEP 1: CHECKOUT CODE + # ============================================ + - name: Checkout repository + uses: actions/checkout@v4 + + # ============================================ + # STEP 2: OPTIONAL SSH ACCESS + # ============================================ + # NOTE: Interactive confirmation is not possible in GitHub Actions. + # As a secure workaround, SSH access is gated by the workflow_dispatch + # input variable 'debug_enabled' which defaults to false. + # Users must explicitly set this to true to enable SSH. + + - name: SSH Warning + if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} + run: | + echo "⚠️ ⚠️ ⚠️ SSH ACCESS ENABLED ⚠️ ⚠️ ⚠️" + echo "" + echo "SECURITY NOTICE:" + echo " - SSH access exposes your GitHub Actions runner" + echo " - Only proceed if you understand and accept the risks" + echo " - Do NOT store secrets or sensitive data on the runner" + echo " - Access is limited to the workflow initiator only" + echo " - The session will remain open until manually terminated" + echo "" + echo "⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️" + + - name: Setup SSH access (if enabled) + if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} + uses: mxschmitt/action-tmate@v3 + with: + detached: true + limit-access-to-actor: true # Only workflow trigger can access + + # ============================================ + # STEP 3: INSTALL BENCHMARKING TOOLS + # ============================================ + + - name: Add tools directory to PATH + run: | + mkdir -p ~/bin + echo "$HOME/bin" >> $GITHUB_PATH + + - name: Cache Fortio binary + id: cache-fortio + if: contains(env.TOOLS, 'fortio') + uses: actions/cache@v4 + with: + path: ~/bin/fortio + key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} + + - name: Install Fortio + if: contains(env.TOOLS, 'fortio') && steps.cache-fortio.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Fortio v${FORTIO_VERSION}" + + # Download and extract fortio binary + wget -q https://github.com/fortio/fortio/releases/download/v${FORTIO_VERSION}/fortio-linux_amd64-${FORTIO_VERSION}.tgz + tar -xzf fortio-linux_amd64-${FORTIO_VERSION}.tgz + + # Store in cache directory + mv usr/bin/fortio ~/bin/ + + - name: Cache Vegeta binary + id: cache-vegeta + if: contains(env.TOOLS, 'vegeta') + uses: actions/cache@v4 + with: + path: ~/bin/vegeta + key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} + + - name: Install Vegeta + if: contains(env.TOOLS, 'vegeta') && steps.cache-vegeta.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Vegeta v${VEGETA_VERSION}" + + # Download and extract vegeta binary + wget -q https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + tar -xzf vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + + # Store in cache directory + mv vegeta ~/bin/ + + - name: Setup k6 + if: contains(env.TOOLS, 'k6') + uses: grafana/setup-k6-action@v1 + with: + k6-version: ${{ env.K6_VERSION }} + + # ============================================ + # STEP 4: START APPLICATION SERVER + # ============================================ + + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.3.7' + bundler: 2.5.4 + + - name: Fix dependency for libyaml-dev + run: sudo apt install libyaml-dev -y + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '22' + cache: yarn + cache-dependency-path: '**/yarn.lock' + + - name: Print system information + run: | + echo "Linux release: "; cat /etc/issue + echo "Current user: "; whoami + echo "Current directory: "; pwd + echo "Ruby version: "; ruby -v + echo "Node version: "; node -v + echo "Yarn version: "; yarn --version + echo "Bundler version: "; bundle --version + + - name: Install Node modules with Yarn for renderer package + run: | + yarn install --no-progress --no-emoji --frozen-lockfile + npm install --global yalc + + - name: yalc publish for react-on-rails + run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish + + - name: yalc add react-on-rails + if: env.RUN_CORE + run: cd spec/dummy && yalc add react-on-rails + + - name: Install Node modules with Yarn for Core dummy app + if: env.RUN_CORE + run: cd spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Save Core dummy app ruby gems to cache + if: env.RUN_CORE + uses: actions/cache@v4 + with: + path: spec/dummy/vendor/bundle + key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for Core dummy app + if: env.RUN_CORE + run: | + cd spec/dummy + bundle config set path vendor/bundle + bundle config set frozen true + bundle _2.5.4_ install --jobs=4 --retry=3 + + - name: Prepare Core production assets + if: env.RUN_CORE + run: | + set -e # Exit on any error + echo "🔨 Building production assets..." + cd spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Core production server + if: env.RUN_CORE + run: | + set -e # Exit on any error + echo "🚀 Starting production server..." + cd spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 5: RUN CORE BENCHMARKS + # ============================================ + + - name: Execute Core benchmark suite + if: env.RUN_CORE + timeout-minutes: 120 + run: | + set -e # Exit on any error + echo "🏃 Running Core benchmark suite..." + + if ! ruby spec/performance/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Stop Core production server + if: env.RUN_CORE && always() + run: | + echo "🛑 Stopping Core production server..." + # Find and kill the Puma process on port 3001 + if lsof -ti:3001 > /dev/null 2>&1; then + kill $(lsof -ti:3001) || true + echo "✅ Server stopped" + else + echo "ℹ️ No server running on port 3001" + fi + + - name: Validate Core benchmark results + if: env.RUN_CORE + run: | + set -e + echo "🔍 Validating benchmark results..." + + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" + exit 1 + fi + + echo "✅ Benchmark results found" + echo "" + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ + + - name: Upload Core benchmark results + uses: actions/upload-artifact@v4 + if: env.RUN_CORE && always() + with: + name: benchmark-core-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + # ============================================ + # STEP 6: SETUP PRO APPLICATION SERVER + # ============================================ + - name: Cache Pro package node modules + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: react_on_rails_pro/node_modules + key: v4-pro-package-node-modules-cache-${{ hashFiles('react_on_rails_pro/yarn.lock') }} + + - name: Cache Pro dummy app node modules + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/node_modules + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/yarn.lock') }} + + - name: Cache Pro dummy app Ruby gems + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/vendor/bundle + key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} + + - name: Install Node modules with Yarn for Pro package + if: env.RUN_PRO + run: | + cd react_on_rails_pro + sudo yarn global add yalc + yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Install Node modules with Yarn for Pro dummy app + if: env.RUN_PRO + run: cd react_on_rails_pro/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Install Ruby Gems for Pro dummy app + if: env.RUN_PRO + run: | + cd react_on_rails_pro/spec/dummy + bundle config set path vendor/bundle + bundle config set frozen true + bundle _2.5.4_ install --jobs=4 --retry=3 + + - name: Generate file-system based entrypoints for Pro + if: env.RUN_PRO + run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs + + - name: Prepare Pro production assets + if: env.RUN_PRO + run: | + set -e + echo "🔨 Building Pro production assets..." + cd react_on_rails_pro/spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Pro production server + if: env.RUN_PRO + run: | + set -e + echo "🚀 Starting Pro production server..." + cd react_on_rails_pro/spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 7: RUN PRO BENCHMARKS + # ============================================ + + - name: Execute Pro benchmark suite + if: env.RUN_PRO + timeout-minutes: 120 + run: | + set -e + echo "🏃 Running Pro benchmark suite..." + + if ! PRO=true ruby spec/performance/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Stop Pro production server + if: env.RUN_PRO && always() + run: | + echo "🛑 Stopping Pro production server..." + # Find and kill the Puma process on port 3001 + if lsof -ti:3001 > /dev/null 2>&1; then + kill $(lsof -ti:3001) || true + echo "✅ Server stopped" + else + echo "ℹ️ No server running on port 3001" + fi + + - name: Validate Pro benchmark results + if: env.RUN_PRO + run: | + set -e + echo "🔍 Validating benchmark results..." + + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" + exit 1 + fi + + echo "✅ Benchmark results found" + echo "" + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ + + - name: Upload Pro benchmark results + uses: actions/upload-artifact@v4 + if: env.RUN_PRO && always() + with: + name: benchmark-pro-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + # ============================================ + # STEP 8: WORKFLOW COMPLETION + # ============================================ + - name: Workflow summary + if: always() + run: | + echo "📋 Benchmark Workflow Summary" + echo "====================================" + echo "Status: ${{ job.status }}" + echo "Run number: ${{ github.run_number }}" + echo "Triggered by: ${{ github.actor }}" + echo "Branch: ${{ github.ref_name }}" + echo "Run Core: ${{ env.RUN_CORE }}" + echo "Run Pro: ${{ env.RUN_PRO }}" + echo "" + if [ "${{ job.status }}" == "success" ]; then + echo "✅ All steps completed successfully" + else + echo "❌ Workflow encountered errors - check logs above" + fi diff --git a/.gitignore b/.gitignore index 3f63eaf013..5b838f6567 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,9 @@ yalc.lock /spec/dummy/.bsb.lock /spec/dummy/**/*.res.js +# Performance test results +/bench_results + # Generated by ROR FS-based Registry generated diff --git a/CLAUDE.md b/CLAUDE.md index fa18a77374..9ab9ee6c2b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -40,6 +40,8 @@ Pre-commit hooks automatically run: - All linters: `rake lint` (runs ESLint and RuboCop) - ESLint only: `yarn run lint` or `rake lint:eslint` - RuboCop only: `rake lint:rubocop` + - GitHub Action files (workflows, reusable actions, etc.): `actionlint` + - YAML files: `yamllint` (or validate the syntax with Ruby if it isn't installed). Do _not_ try to run RuboCop on `.yml` files. - **Code Formatting**: - Format code with Prettier: `rake autofix` - Check formatting without fixing: `yarn start format.listDifferent` diff --git a/knip.ts b/knip.ts index d7047863b6..0bebe2c9b6 100644 --- a/knip.ts +++ b/knip.ts @@ -10,11 +10,14 @@ const config: KnipConfig = { ignoreBinaries: [ // Has to be installed globally 'yalc', + 'ruby', // Used in package.json scripts (devDependency, so unlisted in production mode) 'nps', // Pro package binaries used in Pro workflows 'playwright', 'e2e-test', + // Local binaries + 'bin/.*', ], ignore: ['react_on_rails_pro/**'], ignoreDependencies: [ diff --git a/react_on_rails_pro/Gemfile.development_dependencies b/react_on_rails_pro/Gemfile.development_dependencies index 92c9ead62b..83bd149b7e 100644 --- a/react_on_rails_pro/Gemfile.development_dependencies +++ b/react_on_rails_pro/Gemfile.development_dependencies @@ -15,12 +15,9 @@ gem "puma", "~> 6" # Build JSON APIs with ease. Read more: https://github.com/rails/jbuilder gem "jbuilder" -gem "pg" - # Turbolinks makes following links in your web application faster. Read more: https://github.com/rails/turbolinks gem "turbolinks" gem "sqlite3", "~> 1.4" -gem "uglifier" gem "jquery-rails" gem "sprockets" gem "sass-rails" diff --git a/react_on_rails_pro/Gemfile.lock b/react_on_rails_pro/Gemfile.lock index bc8fc6a08c..82885179ed 100644 --- a/react_on_rails_pro/Gemfile.lock +++ b/react_on_rails_pro/Gemfile.lock @@ -233,7 +233,6 @@ GEM parser (3.3.3.0) ast (~> 2.4.1) racc - pg (1.5.6) pp (0.6.2) prettyprint prettyprint (0.2.0) @@ -414,8 +413,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.5.0) uri (1.0.3) useragent (0.16.11) @@ -468,7 +465,6 @@ DEPENDENCIES net-http net-imap net-smtp - pg pry (>= 0.14.1) pry-byebug! pry-doc @@ -493,7 +489,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/Gemfile.lock b/react_on_rails_pro/spec/dummy/Gemfile.lock index d0f9d868a6..9693d16731 100644 --- a/react_on_rails_pro/spec/dummy/Gemfile.lock +++ b/react_on_rails_pro/spec/dummy/Gemfile.lock @@ -258,7 +258,6 @@ GEM parser (3.3.3.0) ast (~> 2.4.1) racc - pg (1.5.6) pp (0.6.2) prettyprint prettyprint (0.2.0) @@ -450,8 +449,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.5.0) uri (1.0.3) useragent (0.16.11) @@ -515,7 +512,6 @@ DEPENDENCIES net-http net-imap net-smtp - pg prism-rails pry (>= 0.14.1) pry-byebug! @@ -542,7 +538,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/bin/prod b/react_on_rails_pro/spec/dummy/bin/prod new file mode 100755 index 0000000000..35d0d355ce --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then + echo "WARNING: yarn.lock is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +NODE_ENV=production RAILS_ENV=production bundle exec rails server -p 3001 diff --git a/react_on_rails_pro/spec/dummy/bin/prod-assets b/react_on_rails_pro/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..828b1e6ae8 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod-assets @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +bundle exec rails react_on_rails:generate_packs +bundle exec rails assets:precompile diff --git a/react_on_rails_pro/spec/dummy/config/environments/production.rb b/react_on_rails_pro/spec/dummy/config/environments/production.rb index 45a1d5f576..d2b312e1fd 100644 --- a/react_on_rails_pro/spec/dummy/config/environments/production.rb +++ b/react_on_rails_pro/spec/dummy/config/environments/production.rb @@ -19,8 +19,9 @@ config.public_file_server.enabled = true # Compress JavaScripts and CSS. - config.assets.js_compressor = Uglifier.new(harmony: true) - config.assets.css_compressor = :csso + # JS/CSS compression handled by Webpack/Shakapacker, not needed for Sprockets + # config.assets.js_compressor = Uglifier.new(harmony: true) + # config.assets.css_compressor = :csso # Do not fallback to assets pipeline if a precompiled asset is missed. config.assets.compile = false @@ -69,7 +70,7 @@ config.active_support.deprecation = :notify # Use default logging formatter so that PID and timestamp are not suppressed. - config.log_formatter = ::Logger::Formatter.new + config.log_formatter = Logger::Formatter.new # Use a different logger for distributed setups. # require 'syslog/logger' diff --git a/spec/dummy/Gemfile.lock b/spec/dummy/Gemfile.lock index f2990bbf01..351492cfc6 100644 --- a/spec/dummy/Gemfile.lock +++ b/spec/dummy/Gemfile.lock @@ -195,6 +195,8 @@ GEM nokogiri (1.18.10) mini_portile2 (~> 2.8.2) racc (~> 1.4) + nokogiri (1.18.10-x86_64-linux-gnu) + racc (~> 1.4) ostruct (0.6.3) package_json (0.1.0) parallel (1.24.0) @@ -408,6 +410,7 @@ GEM PLATFORMS ruby + x86_64-linux DEPENDENCIES amazing_print diff --git a/spec/dummy/bin/prod b/spec/dummy/bin/prod new file mode 100755 index 0000000000..35d0d355ce --- /dev/null +++ b/spec/dummy/bin/prod @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then + echo "WARNING: yarn.lock is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +NODE_ENV=production RAILS_ENV=production bundle exec rails server -p 3001 diff --git a/spec/dummy/bin/prod-assets b/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..cf493134fa --- /dev/null +++ b/spec/dummy/bin/prod-assets @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +yarn run build:rescript +bundle exec rails assets:precompile diff --git a/spec/dummy/config/puma.rb b/spec/dummy/config/puma.rb index de5feec982..01b93c7d91 100644 --- a/spec/dummy/config/puma.rb +++ b/spec/dummy/config/puma.rb @@ -10,10 +10,12 @@ min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } threads min_threads_count, max_threads_count +rails_env = ENV.fetch("RAILS_ENV", "development") + # Specifies the `worker_timeout` threshold that Puma will use to wait before # terminating a worker in development environments. # -worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development" +worker_timeout 3600 if rails_env == "development" # Specifies the `port` that Puma will listen on to receive requests; default is 3000. # @@ -21,25 +23,32 @@ # Specifies the `environment` that Puma will run in. # -environment ENV.fetch("RAILS_ENV", "development") +environment rails_env # Specifies the `pidfile` that Puma will use. pidfile ENV.fetch("PIDFILE", "tmp/pids/server.pid") -# Specifies the number of `workers` to boot in clustered mode. -# Workers are forked web server processes. If using threads and workers together -# the concurrency of the application would be max `threads` * `workers`. -# Workers do not work on JRuby or Windows (both of which do not support -# processes). -# -# workers ENV.fetch("WEB_CONCURRENCY") { 2 } - -# Use the `preload_app!` method when specifying a `workers` number. -# This directive tells Puma to first boot the application and load code -# before forking the application. This takes advantage of Copy On Write -# process behavior so workers use less memory. -# -# preload_app! +if rails_env == "production" + # Specifies the number of `workers` to boot in clustered mode. + # Workers are forked web server processes. If using threads and workers together + # the concurrency of the application would be max `threads` * `workers`. + # Workers do not work on JRuby or Windows (both of which do not support + # processes). + # + workers ENV.fetch("WEB_CONCURRENCY", 2) + + # Use the `preload_app!` method when specifying a `workers` number. + # This directive tells Puma to first boot the application and load code + # before forking the application. This takes advantage of Copy On Write + # process behavior so workers use less memory. + # + preload_app! + + # Specifies the `worker_shutdown_timeout` threshold that Puma will use to wait before + # terminating a worker. + # + worker_shutdown_timeout 60 +end # Allow puma to be restarted by `bin/rails restart` command. plugin :tmp_restart diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb new file mode 100755 index 0000000000..ad1dc42674 --- /dev/null +++ b/spec/performance/bench.rb @@ -0,0 +1,407 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require "English" +require "json" +require "fileutils" +require "net/http" +require "uri" + +# Helper to get env var with default, +# treating empty string and "0" as unset since they can come from the benchmark workflow. +def env_or_default(key, default) + value = ENV[key].to_s + value.empty? || value == "0" ? default : value +end + +# Benchmark parameters +PRO = ENV.fetch("PRO", "false") == "true" +APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" +ROUTES = env_or_default("ROUTES", nil) +BASE_URL = env_or_default("BASE_URL", "localhost:3001") +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE = env_or_default("RATE", "50") +# concurrent connections/virtual users +CONNECTIONS = env_or_default("CONNECTIONS", 10).to_i +# maximum connections/virtual users +MAX_CONNECTIONS = env_or_default("MAX_CONNECTIONS", CONNECTIONS).to_i +# benchmark duration (duration string like "30s", "1m", "90s") +DURATION = env_or_default("DURATION", "30s") +# request timeout (duration string as above) +REQUEST_TIMEOUT = env_or_default("REQUEST_TIMEOUT", "60s") +# Tools to run (comma-separated) +TOOLS = env_or_default("TOOLS", "fortio,vegeta,k6").split(",") + +OUTDIR = "bench_results" +SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze + +# Validate input parameters +def validate_rate(rate) + return if rate == "max" + + return if rate.match?(/^\d+(\.\d+)?$/) && rate.to_f.positive? + + raise "RATE must be 'max' or a positive number (got: '#{rate}')" +end + +def validate_positive_integer(value, name) + return if value.is_a?(Integer) && value.positive? + + raise "#{name} must be a positive integer (got: '#{value}')" +end + +def validate_duration(value, name) + return if value.match?(/^(\d+(\.\d+)?[smh])+$/) + + raise "#{name} must be a duration like '10s', '1m', '1.5m' (got: '#{value}')" +end + +def parse_json_file(file_path, tool_name) + JSON.parse(File.read(file_path)) +rescue Errno::ENOENT + raise "#{tool_name} results file not found: #{file_path}" +rescue JSON::ParserError => e + raise "Failed to parse #{tool_name} JSON: #{e.message}" +rescue StandardError => e + raise "Failed to read #{tool_name} results: #{e.message}" +end + +def failure_metrics(error) + ["FAILED", "FAILED", "FAILED", "FAILED", error.message] +end + +def add_summary_line(*parts) + File.open(SUMMARY_TXT, "a") do |f| + f.puts parts.join("\t") + end +end + +# Get routes from the Rails app filtered by pages# and react_router# controllers +def get_benchmark_routes(app_dir) + routes_output = `cd #{app_dir} && bundle exec rails routes 2>&1` + raise "Failed to get routes from #{app_dir}" unless $CHILD_STATUS.success? + + routes = [] + routes_output.each_line do |line| + # Parse lines like: "server_side_hello_world GET /server_side_hello_world(.:format) pages#server_side_hello_world" + # We want GET routes only (not POST, etc.) served by pages# or react_router# controllers + # Capture path up to (.:format) part using [^(\s]+ (everything except '(' and whitespace) + next unless (match = line.match(/GET\s+([^(\s]+).*(pages|react_router)#/)) + + path = match[1] + path = "/" if path.empty? # Handle root route + routes << path + end + raise "No pages# or react_router# routes found in #{app_dir}" if routes.empty? + + routes +end + +# Get all routes to benchmark +routes = + if ROUTES + ROUTES.split(",").map(&:strip).reject(&:empty?) + else + get_benchmark_routes(APP_DIR) + end + +raise "No routes to benchmark" if routes.empty? + +validate_rate(RATE) +validate_positive_integer(CONNECTIONS, "CONNECTIONS") +validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") +validate_duration(DURATION, "DURATION") +validate_duration(REQUEST_TIMEOUT, "REQUEST_TIMEOUT") + +raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS + +# Check required tools are installed +required_tools = TOOLS + %w[column tee] +required_tools.each do |cmd| + raise "required tool '#{cmd}' is not installed" unless system("command -v #{cmd} >/dev/null 2>&1") +end + +puts <<~PARAMS + Benchmark parameters: + - APP_DIR: #{APP_DIR} + - ROUTES: #{ROUTES || 'auto-detect from Rails'} + - BASE_URL: #{BASE_URL} + - RATE: #{RATE} + - DURATION: #{DURATION} + - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} + - CONNECTIONS: #{CONNECTIONS} + - MAX_CONNECTIONS: #{MAX_CONNECTIONS} + - WEB_CONCURRENCY: #{ENV['WEB_CONCURRENCY'] || 'unset'} + - RAILS_MAX_THREADS: #{ENV['RAILS_MAX_THREADS'] || 'unset'} + - RAILS_MIN_THREADS: #{ENV['RAILS_MIN_THREADS'] || 'unset'} + - TOOLS: #{TOOLS.join(', ')} +PARAMS + +# Helper method to check if server is responding +def server_responding?(uri) + response = Net::HTTP.get_response(uri) + # Accept both success (2xx) and redirect (3xx) responses as "server is responding" + success = response.is_a?(Net::HTTPSuccess) || response.is_a?(Net::HTTPRedirection) + info = "HTTP #{response.code} #{response.message}" + info += " -> #{response['location']}" if response.is_a?(Net::HTTPRedirection) && response["location"] + { success: success, info: info } +rescue StandardError => e + { success: false, info: "#{e.class.name}: #{e.message}" } +end + +# Wait for the server to be ready +TIMEOUT_SEC = 60 +puts "Checking server availability at #{BASE_URL}..." +test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") +start_time = Time.now +attempt_count = 0 +loop do + attempt_count += 1 + attempt_start = Time.now + result = server_responding?(test_uri) + attempt_duration = Time.now - attempt_start + elapsed = Time.now - start_time + + # rubocop:disable Layout/LineLength + if result[:success] + puts " ✅ Attempt #{attempt_count} at #{elapsed.round(2)}s: SUCCESS - #{result[:info]} (took #{attempt_duration.round(3)}s)" + break + else + puts " ❌ Attempt #{attempt_count} at #{elapsed.round(2)}s: FAILED - #{result[:info]} (took #{attempt_duration.round(3)}s)" + end + # rubocop:enable Layout/LineLength + + raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if elapsed > TIMEOUT_SEC + + sleep 1 +end +puts "Server is ready!" + +FileUtils.mkdir_p(OUTDIR) + +# Validate RATE=max constraint +IS_MAX_RATE = RATE == "max" +if IS_MAX_RATE && CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" +end + +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength + +# Benchmark a single route with Fortio +def run_fortio_benchmark(target, route_name) + return nil unless TOOLS.include?("fortio") + + begin + puts "===> Fortio: #{route_name}" + + fortio_json = "#{OUTDIR}/#{route_name}_fortio.json" + fortio_txt = "#{OUTDIR}/#{route_name}_fortio.txt" + + # Configure Fortio arguments + # See https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio_args = + if IS_MAX_RATE + ["-qps", 0, "-c", CONNECTIONS] + else + ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] + end + + fortio_cmd = [ + "fortio", "load", + *fortio_args, + "-t", DURATION, + "-timeout", REQUEST_TIMEOUT, + # Allow redirects. Could use -L instead, but it uses the slower HTTP client. + "-allow-initial-errors", + "-json", fortio_json, + target + ].join(" ") + raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{fortio_txt}") + + fortio_data = parse_json_file(fortio_json, "Fortio") + fortio_rps = fortio_data["ActualQPS"]&.round(2) || "missing" + + percentiles = fortio_data.dig("DurationHistogram", "Percentiles") || [] + p50_data = percentiles.find { |p| p["Percentile"] == 50 } + p90_data = percentiles.find { |p| p["Percentile"] == 90 } + p99_data = percentiles.find { |p| p["Percentile"] == 99 } + + raise "Fortio results missing percentile data" unless p50_data && p90_data && p99_data + + fortio_p50 = (p50_data["Value"] * 1000).round(2) + fortio_p90 = (p90_data["Value"] * 1000).round(2) + fortio_p99 = (p99_data["Value"] * 1000).round(2) + fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [fortio_rps, fortio_p50, fortio_p90, fortio_p99, fortio_status] + rescue StandardError => error + puts "Error: #{error.message}" + failure_metrics(error) + end +end + +# Benchmark a single route with Vegeta +def run_vegeta_benchmark(target, route_name) + return nil unless TOOLS.include?("vegeta") + + begin + puts "\n===> Vegeta: #{route_name}" + + vegeta_bin = "#{OUTDIR}/#{route_name}_vegeta.bin" + vegeta_json = "#{OUTDIR}/#{route_name}_vegeta.json" + vegeta_txt = "#{OUTDIR}/#{route_name}_vegeta.txt" + + # Configure Vegeta arguments + vegeta_args = + if IS_MAX_RATE + ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] + else + ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] + end + + vegeta_cmd = [ + "echo 'GET #{target}' |", + "vegeta", "attack", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}" + ].join(" ") + raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{vegeta_bin} | vegeta report | tee #{vegeta_txt}") + raise "Vegeta report generation failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") + + vegeta_data = parse_json_file(vegeta_json, "Vegeta") + vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" + vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status] + rescue StandardError => error + puts "Error: #{error.message}" + failure_metrics(error) + end +end + +# Benchmark a single route with k6 +def run_k6_benchmark(target, route_name) + return nil unless TOOLS.include?("k6") + + begin + puts "\n===> k6: #{route_name}" + + k6_script_file = "#{OUTDIR}/#{route_name}_k6_test.js" + k6_summary_json = "#{OUTDIR}/#{route_name}_k6_summary.json" + k6_txt = "#{OUTDIR}/#{route_name}_k6.txt" + + # Configure k6 scenarios + k6_scenarios = + if IS_MAX_RATE + <<~JS.strip + { + max_rate: { + executor: 'constant-vus', + vus: #{CONNECTIONS}, + duration: '#{DURATION}' + } + } + JS + else + <<~JS.strip + { + constant_rate: { + executor: 'constant-arrival-rate', + rate: #{RATE}, + timeUnit: '1s', + duration: '#{DURATION}', + preAllocatedVUs: #{CONNECTIONS}, + maxVUs: #{MAX_CONNECTIONS} + } + } + JS + end + + k6_script = <<~JS + import http from 'k6/http'; + import { check } from 'k6'; + + export const options = { + scenarios: #{k6_scenarios}, + }; + + export default function () { + const response = http.get('#{target}', { timeout: '#{REQUEST_TIMEOUT}' }); + check(response, { + 'status=200': r => r.status === 200, + }); + } + JS + File.write(k6_script_file, k6_script) + k6_command = "k6 run --summary-export=#{k6_summary_json} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" + raise "k6 benchmark failed" unless system("#{k6_command} #{k6_script_file} | tee #{k6_txt}") + + k6_data = parse_json_file(k6_summary_json, "k6") + k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" + k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" + k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" + k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" + + # Status: compute successful vs failed requests + k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 + k6_checks = k6_data.dig("root_group", "checks") || {} + k6_status_parts = k6_checks.map do |name, check| + status_label = name.start_with?("status=") ? name.delete_prefix("status=") : name + "#{status_label}=#{check['passes']}" + end + k6_reqs_known_status = k6_checks.values.sum { |check| check["passes"] || 0 } + k6_reqs_other = k6_reqs_total - k6_reqs_known_status + k6_status_parts << "other=#{k6_reqs_other}" if k6_reqs_other.positive? + k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") + + [k6_rps, k6_p50, k6_p90, k6_p99, k6_status] + rescue StandardError => error + puts "Error: #{error.message}" + failure_metrics(error) + end +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength + +# Initialize summary file +File.write(SUMMARY_TXT, "") +add_summary_line("Route", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") + +# Run benchmarks for each route +routes.each do |route| + separator = "=" * 80 + puts "\n#{separator}" + puts "Benchmarking route: #{route}" + puts separator + + target = URI.parse("http://#{BASE_URL}#{route}") + + # Warm up server for this route + puts "Warming up server for #{route} with 10 requests..." + 10.times do + server_responding?(target) + sleep 0.5 + end + puts "Warm-up complete for #{route}" + + # Sanitize route name for filenames + route_name = route.gsub(%r{^/}, "").tr("/", "_") + route_name = "root" if route_name.empty? + + # Run each benchmark tool + fortio_metrics = run_fortio_benchmark(target, route_name) + add_summary_line(route, "Fortio", *fortio_metrics) if fortio_metrics + + vegeta_metrics = run_vegeta_benchmark(target, route_name) + add_summary_line(route, "Vegeta", *vegeta_metrics) if vegeta_metrics + + k6_metrics = run_k6_benchmark(target, route_name) + add_summary_line(route, "k6", *k6_metrics) if k6_metrics +end + +puts "\nSummary saved to #{SUMMARY_TXT}" +system("column", "-t", "-s", "\t", SUMMARY_TXT)