chore: cleanup internal docs and local config files #110
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: CI | |
| on: | |
| push: | |
| branches: [main, master] | |
| pull_request: | |
| branches: [main, master] | |
| env: | |
| CARGO_TERM_COLOR: always | |
| jobs: | |
| build: | |
| runs-on: ${{ matrix.os }} | |
| strategy: | |
| matrix: | |
| os: [ubuntu-latest, windows-latest, macos-latest] | |
| rust: [stable] | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Install Rust | |
| uses: dtolnay/rust-toolchain@stable | |
| with: | |
| components: clippy, rustfmt | |
| - name: Cache cargo | |
| uses: actions/cache@v4 | |
| with: | |
| path: | | |
| ~/.cargo/bin/ | |
| ~/.cargo/registry/index/ | |
| ~/.cargo/registry/cache/ | |
| ~/.cargo/git/db/ | |
| target/ | |
| key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} | |
| - name: Install Ollama (Linux) | |
| if: runner.os == 'Linux' | |
| run: | | |
| # Install Ollama with retry logic for transient 503 errors | |
| install_ollama() { | |
| local max_retries=5 | |
| local retry=0 | |
| local delay=5 | |
| while [ $retry -lt $max_retries ]; do | |
| echo "Attempting Ollama installation (attempt $((retry + 1))/$max_retries)..." | |
| if curl -fsSL --retry 3 --retry-delay 5 https://ollama.com/install.sh | sh; then | |
| echo "Ollama installation successful!" | |
| return 0 | |
| fi | |
| retry=$((retry + 1)) | |
| if [ $retry -lt $max_retries ]; then | |
| echo "Installation failed, retrying in ${delay}s..." | |
| sleep $delay | |
| delay=$((delay * 2)) # Exponential backoff | |
| fi | |
| done | |
| echo "ERROR: Ollama installation failed after $max_retries attempts" | |
| return 1 | |
| } | |
| install_ollama | |
| OLLAMA_KEEP_ALIVE=60m ollama serve & | |
| # Wait for server to be ready | |
| for i in {1..30}; do | |
| if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then | |
| echo "Ollama server is ready" | |
| break | |
| fi | |
| echo "Waiting for Ollama server... ($i/30)" | |
| sleep 2 | |
| done | |
| ollama pull llama3.2 | |
| # Warm up the model by loading it into memory - MUST complete before tests | |
| echo "Warming up llama3.2 model (this loads it into memory)..." | |
| curl -s --max-time 300 http://localhost:11434/api/generate -d '{"model": "llama3.2", "prompt": "Say hello", "stream": false}' | |
| echo "" | |
| echo "Model warm-up complete - model is now loaded" | |
| - name: Install Ollama (macOS) | |
| if: runner.os == 'macOS' | |
| run: | | |
| brew install ollama | |
| OLLAMA_KEEP_ALIVE=60m ollama serve & | |
| # Wait for server to be ready | |
| for i in {1..30}; do | |
| if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then | |
| echo "Ollama server is ready" | |
| break | |
| fi | |
| echo "Waiting for Ollama server... ($i/30)" | |
| sleep 2 | |
| done | |
| echo "Pulling llama3.2 model..." | |
| ollama pull llama3.2 | |
| echo "Model pull complete. Checking available models:" | |
| ollama list | |
| # Warm up the model by loading it into memory - MUST complete before tests | |
| echo "Warming up llama3.2 model (this loads it into memory)..." | |
| echo "Start time: $(date)" | |
| curl -v --max-time 600 http://localhost:11434/api/generate -d '{"model": "llama3.2", "prompt": "Say hello", "stream": false}' 2>&1 || { | |
| echo "Warm-up curl failed with exit code $?" | |
| echo "Checking if Ollama is still running..." | |
| curl -s http://localhost:11434/api/tags || echo "Server not responding" | |
| exit 1 | |
| } | |
| echo "" | |
| echo "End time: $(date)" | |
| echo "Model warm-up complete - model is now loaded" | |
| - name: Install Ollama (Windows) | |
| if: runner.os == 'Windows' | |
| run: | | |
| # Download Ollama CLI directly (smaller than full zip) | |
| $ollamaUrl = "https://github.com/ollama/ollama/releases/download/v0.13.2/ollama-windows-amd64.zip" | |
| Write-Host "Downloading Ollama from $ollamaUrl" | |
| Invoke-WebRequest -Uri $ollamaUrl -OutFile "ollama.zip" -TimeoutSec 600 | |
| # Extract | |
| Expand-Archive -Path "ollama.zip" -DestinationPath "." -Force | |
| # Verify ollama.exe exists | |
| if (Test-Path ".\ollama.exe") { | |
| Write-Host "ollama.exe found" | |
| } else { | |
| Write-Host "ERROR: ollama.exe not found after extraction" | |
| Get-ChildItem -Recurse | Select-Object FullName | |
| exit 1 | |
| } | |
| # Start Ollama server in background with keep-alive to prevent model unloading | |
| $env:OLLAMA_KEEP_ALIVE = "60m" | |
| Start-Process -FilePath ".\ollama.exe" -ArgumentList "serve" -WindowStyle Hidden | |
| # Wait for server to be ready | |
| $maxRetries = 30 | |
| $retry = 0 | |
| while ($retry -lt $maxRetries) { | |
| Start-Sleep -Seconds 2 | |
| try { | |
| $response = Invoke-WebRequest -Uri "http://localhost:11434/api/tags" -TimeoutSec 5 -ErrorAction SilentlyContinue | |
| if ($response.StatusCode -eq 200) { | |
| Write-Host "Ollama server is ready" | |
| break | |
| } | |
| } catch { | |
| Write-Host "Waiting for Ollama server... ($retry/$maxRetries)" | |
| } | |
| $retry++ | |
| } | |
| if ($retry -eq $maxRetries) { | |
| Write-Host "ERROR: Ollama server failed to start" | |
| exit 1 | |
| } | |
| # Pull the model | |
| .\ollama.exe pull llama3.2 | |
| # Warm up the model by loading it into memory - MUST complete before tests | |
| Write-Host "Warming up llama3.2 model (this loads it into memory)..." | |
| $body = '{"model": "llama3.2", "prompt": "Say hello", "stream": false}' | |
| $response = Invoke-WebRequest -Uri "http://localhost:11434/api/generate" -Method POST -Body $body -ContentType "application/json" -TimeoutSec 300 | |
| Write-Host "Model warm-up complete - model is now loaded" | |
| Write-Host "Response: $($response.Content)" | |
| shell: pwsh | |
| - name: Check formatting | |
| run: cargo fmt -- --check | |
| - name: Clippy | |
| run: cargo clippy -- -D warnings | |
| - name: Build | |
| run: cargo build --verbose | |
| - name: Build tests (pre-compile before Ollama warm-up) | |
| run: cargo test --no-run --verbose | |
| - name: Re-warm and run Ollama tests FIRST (Linux/macOS) | |
| if: runner.os != 'Windows' | |
| env: | |
| ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} | |
| OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} | |
| run: | | |
| echo "Checking Ollama server health..." | |
| if ! curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then | |
| echo "ERROR: Ollama server is not responding!" | |
| echo "Attempting to restart Ollama..." | |
| OLLAMA_KEEP_ALIVE=60m ollama serve & | |
| sleep 5 | |
| fi | |
| echo "Available models:" | |
| curl -s http://localhost:11434/api/tags | |
| echo "" | |
| echo "Re-warming llama3.2 model..." | |
| curl -s --max-time 300 http://localhost:11434/api/generate -d '{"model": "llama3.2", "prompt": "Ready", "stream": false}' || { | |
| echo "Warm-up failed, checking server status..." | |
| curl -s http://localhost:11434/api/tags || echo "Server not responding" | |
| exit 1 | |
| } | |
| echo "" | |
| echo "Model loaded - running Ollama tests IMMEDIATELY while model is in memory..." | |
| # Run both Ollama-specific tests that need the model loaded | |
| cargo test --test ai_integration -- test_ollama_explain_finding --test-threads=1 | |
| cargo test --test ai_integration -- test_all_providers_explain_same_finding --test-threads=1 | |
| echo "Ollama tests passed!" | |
| - name: Re-warm and run Ollama tests FIRST (Windows) | |
| if: runner.os == 'Windows' | |
| env: | |
| ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} | |
| OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} | |
| run: | | |
| Write-Host "Checking Ollama server health..." | |
| try { | |
| $response = Invoke-WebRequest -Uri "http://localhost:11434/api/tags" -TimeoutSec 5 -ErrorAction Stop | |
| Write-Host "Server is healthy" | |
| } catch { | |
| Write-Host "WARNING: Ollama server not responding, attempting restart..." | |
| $env:OLLAMA_KEEP_ALIVE = "60m" | |
| Start-Process -FilePath ".\ollama.exe" -ArgumentList "serve" -WindowStyle Hidden | |
| Start-Sleep -Seconds 5 | |
| } | |
| Write-Host "Re-warming llama3.2 model..." | |
| $body = '{"model": "llama3.2", "prompt": "Ready", "stream": false}' | |
| Invoke-WebRequest -Uri "http://localhost:11434/api/generate" -Method POST -Body $body -ContentType "application/json" -TimeoutSec 300 | |
| Write-Host "Model loaded - running Ollama tests IMMEDIATELY while model is in memory..." | |
| # Run both Ollama-specific tests that need the model loaded | |
| cargo test --test ai_integration -- test_ollama_explain_finding --test-threads=1 | |
| cargo test --test ai_integration -- test_all_providers_explain_same_finding --test-threads=1 | |
| Write-Host "Ollama tests passed!" | |
| shell: pwsh | |
| - name: Run remaining tests | |
| run: cargo test --verbose -- --skip test_ollama_explain_finding --skip test_all_providers_explain_same_finding | |
| env: | |
| ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} | |
| OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} | |
| release: | |
| needs: build | |
| runs-on: ubuntu-latest | |
| if: startsWith(github.ref, 'refs/tags/') | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Install Rust | |
| uses: dtolnay/rust-toolchain@stable | |
| - name: Build release | |
| run: cargo build --release | |
| - name: Create Release | |
| uses: softprops/action-gh-release@v1 | |
| with: | |
| files: target/release/mcplint | |
| env: | |
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |