Skip to content

wip: prebaked platform VM #2102

wip: prebaked platform VM

wip: prebaked platform VM #2102

name: integration-test
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch: { }
pull_request:
paths:
- 'apps/agentstack-server/**'
- 'apps/agentstack-cli/**'
push:
branches:
- main
paths:
- 'apps/agentstack-server/**'
- 'apps/agentstack-cli/**'
jobs:
integration-test:
timeout-minutes: 25
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Maximize build space
uses: ./.github/actions/maximize-build-space
with:
root-reserve-mb: 15360
temp-reserve-mb: 2048
swap-size-mb: 1024
remove-dotnet: 'true'
- name: "Install QEMU"
run: |
sudo apt-get update -qq
sudo apt-get install -qqy --no-install-recommends qemu-system-x86 qemu-utils ovmf
sudo modprobe kvm || true
sudo chown $(whoami) /dev/kvm || true
- name: "Cache ~/.cache/lima"
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5
with:
path: ~/.cache/lima
key: lima-v0-x86_64-${{ hashFiles('mise.lock') }}
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: ./.github/actions/setup
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: mise run agentstack-server:test:integration
env:
LLM_API_BASE: "${{ secrets.OPENAI_API_BASE }}"
LLM_MODEL: "${{ vars.OPENAI_MODEL }}"
LLM_API_KEY: "${{ secrets.OPENAI_API_KEY }}"
# LLM_API_BASE: "https://api.groq.com/openai/v1"
# LLM_MODEL: "groq:meta-llama/llama-4-maverick-17b-128e-instruct"
# LLM_API_KEY: "${{ secrets.GROQ_API_KEY }}"
- if: failure()
run: |
echo "=== platform VM serial logs ===" && find /home/runner/.agentstack/lima -name 'serial*.log' 2>/dev/null | xargs -r cat
echo "=== builder VM serial logs ===" && find /home/runner/.lima -name 'serial*.log' 2>/dev/null | xargs -r cat
- run: uv cache prune --ci