diff --git a/.github/instructions/cicd.instructions.md b/.github/instructions/cicd.instructions.md
index 31a425822..5ee4624c9 100644
--- a/.github/instructions/cicd.instructions.md
+++ b/.github/instructions/cicd.instructions.md
@@ -9,7 +9,8 @@ description: "CI/CD Pipeline configuration for PyInstaller binary packaging and
Three workflows split by trigger and secret requirements:
1. **`ci.yml`** — `pull_request` trigger (all PRs, including forks)
- - **Linux-only** (ubuntu-24.04). Unit tests + single binary build. No secrets needed. Fast PR feedback (~3 min).
+ - **Linux + Windows** (ubuntu-24.04, windows-latest). Unit tests in parallel on both platforms + single Linux binary build. No secrets needed.
+ - Windows job catches path separator, encoding, and platform-specific issues before merge.
- Uploads Linux x86_64 binary artifact for downstream integration testing.
2. **`ci-integration.yml`** — `workflow_run` trigger (after CI completes, environment-gated)
- **Linux-only**. Smoke tests, integration tests, release validation. Requires `integration-tests` environment approval.
@@ -21,9 +22,9 @@ Three workflows split by trigger and secret requirements:
- macOS builds and cross-platform validation happen here, where queue time doesn't block PRs.
## Platform Testing Strategy
-- **PR time**: Linux-only for speed. Catches logic bugs, dependency issues, and binary packaging problems.
-- **Post-merge**: Full 4-platform matrix catches platform-specific issues immediately on main.
-- **Rationale**: PR-time Linux coverage gives fast feedback on logic, dependency, and packaging changes, while the post-merge full-matrix workflows quickly catch any remaining platform-specific issues.
+- **PR time**: Linux + Windows in parallel. Catches logic bugs, dependency issues, path separators, encoding, and Windows-specific problems before merge.
+- **Post-merge**: Full 5-platform matrix (linux x86_64/arm64, darwin x86_64/arm64, windows x86_64) catches remaining platform-specific issues on main.
+- **Rationale**: Linux + Windows PR coverage catches the two fundamentally different platform families (Unix vs Windows). macOS-specific issues are rare and caught post-merge.
## PyInstaller Binary Packaging
- **CRITICAL**: Uses `--onedir` mode (NOT `--onefile`) for faster CLI startup performance
diff --git a/.github/workflows/build-release.yml b/.github/workflows/build-release.yml
index b243e3c31..8010408c7 100644
--- a/.github/workflows/build-release.yml
+++ b/.github/workflows/build-release.yml
@@ -48,6 +48,9 @@ jobs:
- os: macos-latest
arch: arm64
platform: darwin
+ - os: windows-latest
+ arch: x86_64
+ platform: windows
steps:
- uses: actions/checkout@v4
@@ -70,10 +73,18 @@ jobs:
# Wait for installation to complete
until xcode-select -p >/dev/null 2>&1; do sleep 5; done
- - name: Install uv
+ - name: Install uv (Unix)
+ if: matrix.platform != 'windows'
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
+
+ - name: Install uv (Windows)
+ if: matrix.platform == 'windows'
+ shell: pwsh
+ run: |
+ irm https://astral.sh/uv/install.ps1 | iex
+ echo "$env:USERPROFILE\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Cache uv environments
uses: actions/cache@v3
@@ -81,6 +92,7 @@ jobs:
path: |
~/.cache/uv
~/.local/share/uv
+ ~\AppData\Local\uv\cache
key: ${{ runner.os }}-uv-${{ hashFiles('**/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-uv-
@@ -120,6 +132,10 @@ jobs:
platform: darwin
arch: arm64
binary_name: apm-darwin-arm64
+ - os: windows-latest
+ platform: windows
+ arch: x86_64
+ binary_name: apm-windows-x86_64
runs-on: ${{ matrix.os }}
permissions:
@@ -149,19 +165,34 @@ jobs:
until xcode-select -p >/dev/null 2>&1; do sleep 5; done
brew install upx
- - name: Install uv
+ - name: Install uv (Unix)
+ if: matrix.platform != 'windows'
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
+
+ - name: Install uv (Windows)
+ if: matrix.platform == 'windows'
+ shell: pwsh
+ run: |
+ irm https://astral.sh/uv/install.ps1 | iex
+ echo "$env:USERPROFILE\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Install Python dependencies
run: |
uv sync --extra dev --extra build
- - name: Build binary
+ - name: Build binary (Unix)
+ if: matrix.platform != 'windows'
run: |
chmod +x scripts/build-binary.sh
uv run ./scripts/build-binary.sh
+
+ - name: Build binary (Windows)
+ if: matrix.platform == 'windows'
+ shell: pwsh
+ run: |
+ uv run pwsh scripts/windows/build-binary.ps1
- name: Upload binary as workflow artifact
uses: actions/upload-artifact@v4
@@ -171,7 +202,11 @@ jobs:
./dist/${{ matrix.binary_name }}
./dist/${{ matrix.binary_name }}.sha256
./scripts/test-release-validation.sh
+ ./scripts/windows/test-release-validation.ps1
+ ./scripts/test-dependency-integration.sh
+ ./scripts/windows/test-dependency-integration.ps1
./scripts/github-token-helper.sh
+ ./scripts/windows/github-token-helper.ps1
include-hidden-files: true # Required to include .apm directories
retention-days: 30
if-no-files-found: error
@@ -202,6 +237,10 @@ jobs:
arch: arm64
platform: darwin
binary_name: apm-darwin-arm64
+ - os: windows-latest
+ arch: x86_64
+ platform: windows
+ binary_name: apm-windows-x86_64
runs-on: ${{ matrix.os }}
permissions:
@@ -236,15 +275,24 @@ jobs:
# Wait for installation to complete
until xcode-select -p >/dev/null 2>&1; do sleep 5; done
- - name: Install uv
+ - name: Install uv (Unix)
+ if: matrix.platform != 'windows'
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
+
+ - name: Install uv (Windows)
+ if: matrix.platform == 'windows'
+ shell: pwsh
+ run: |
+ irm https://astral.sh/uv/install.ps1 | iex
+ echo "$env:USERPROFILE\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Install test dependencies
run: uv sync --extra dev
- - name: Run integration tests
+ - name: Run integration tests (Unix)
+ if: matrix.platform != 'windows'
env:
APM_E2E_TESTS: "1"
GITHUB_TOKEN: ${{ secrets.GH_MODELS_PAT }} # Models access
@@ -255,6 +303,18 @@ jobs:
uv run ./scripts/test-integration.sh
timeout-minutes: 20
+ - name: Run integration tests (Windows)
+ if: matrix.platform == 'windows'
+ shell: pwsh
+ env:
+ APM_E2E_TESTS: "1"
+ GITHUB_TOKEN: ${{ secrets.GH_MODELS_PAT }}
+ GITHUB_APM_PAT: ${{ secrets.GH_CLI_PAT }}
+ ADO_APM_PAT: ${{ secrets.ADO_APM_PAT }}
+ run: |
+ uv run pwsh scripts/windows/test-integration.ps1 -SkipBuild
+ timeout-minutes: 20
+
# Release validation tests - Final pre-release validation of shipped binary
release-validation:
name: Release Validation
@@ -279,6 +339,10 @@ jobs:
arch: arm64
platform: darwin
binary_name: apm-darwin-arm64
+ - os: windows-latest
+ arch: x86_64
+ platform: windows
+ binary_name: apm-windows-x86_64
runs-on: ${{ matrix.os }}
permissions:
@@ -308,9 +372,10 @@ jobs:
uses: actions/download-artifact@v4
with:
name: ${{ matrix.binary_name }}
- path: /tmp/apm-isolated-test/
+ path: ${{ matrix.platform == 'windows' && 'D:\apm-isolated-test' || '/tmp/apm-isolated-test/' }}
- - name: Make binary executable and verify isolation
+ - name: Make binary executable and verify isolation (Unix)
+ if: matrix.platform != 'windows'
run: |
cd /tmp/apm-isolated-test
@@ -322,24 +387,54 @@ jobs:
# Make the binary executable
chmod +x ./dist/${{ matrix.binary_name }}/apm
- - name: Create APM symlink for testing
+ - name: Create APM symlink for testing (Unix)
+ if: matrix.platform != 'windows'
run: |
cd /tmp/apm-isolated-test
ln -s "$(pwd)/dist/${{ matrix.binary_name }}/apm" "$(pwd)/apm"
echo "/tmp/apm-isolated-test" >> $GITHUB_PATH
+
+ - name: Verify binary and add to PATH (Windows)
+ if: matrix.platform == 'windows'
+ shell: pwsh
+ run: |
+ cd D:\apm-isolated-test
+
+ # Debug: List the downloaded structure
+ Write-Host "Downloaded structure:"
+ Get-ChildItem -Recurse -Filter "apm.exe"
+ Get-ChildItem .\dist\
+
+ # Add binary directory to PATH
+ $binDir = "D:\apm-isolated-test\dist\${{ matrix.binary_name }}"
+ echo $binDir | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- - name: Run release validation tests
+ - name: Run release validation tests (Unix)
+ if: matrix.platform != 'windows'
env:
- APM_E2E_TESTS: "1" # Avoids interactive prompts for MCP env values with apm install
+ APM_E2E_TESTS: "1"
GITHUB_TOKEN: ${{ secrets.GH_MODELS_PAT }}
- GITHUB_APM_PAT: ${{ secrets.GH_CLI_PAT }} # Primary: APM module access
- ADO_APM_PAT: ${{ secrets.ADO_APM_PAT }} # Azure DevOps module access
+ GITHUB_APM_PAT: ${{ secrets.GH_CLI_PAT }}
+ ADO_APM_PAT: ${{ secrets.ADO_APM_PAT }}
run: |
cd /tmp/apm-isolated-test
chmod +x scripts/test-release-validation.sh
./scripts/test-release-validation.sh
timeout-minutes: 20
+ - name: Run release validation tests (Windows)
+ if: matrix.platform == 'windows'
+ shell: pwsh
+ env:
+ APM_E2E_TESTS: "1"
+ GITHUB_TOKEN: ${{ secrets.GH_MODELS_PAT }}
+ GITHUB_APM_PAT: ${{ secrets.GH_CLI_PAT }}
+ ADO_APM_PAT: ${{ secrets.ADO_APM_PAT }}
+ run: |
+ cd D:\apm-isolated-test
+ .\scripts\windows\test-release-validation.ps1
+ timeout-minutes: 20
+
create-release:
name: Create GitHub Release
@@ -395,6 +490,32 @@ jobs:
exit 1
fi
done
+
+ binary="apm-windows-x86_64"
+ artifact_dir="${binary}"
+ binary_dir="${artifact_dir}/dist/${binary}"
+ if [ -d "$binary_dir" ] && [ -f "$binary_dir/apm.exe" ]; then
+ echo "Processing $binary_dir directory..."
+ (
+ cd "${artifact_dir}/dist"
+ zip -qr "../../${binary}.zip" "${binary}"
+ )
+ if command -v sha256sum &> /dev/null; then
+ sha256sum "${binary}.zip" > "${binary}.zip.sha256"
+ elif command -v shasum &> /dev/null; then
+ shasum -a 256 "${binary}.zip" > "${binary}.zip.sha256"
+ fi
+ echo "Created ${binary}.zip"
+ else
+ echo "ERROR: Binary directory $binary_dir not found or $binary_dir/apm.exe missing"
+ echo "Artifact directory contents:"
+ ls -la "$artifact_dir/" || echo "Directory $artifact_dir does not exist"
+ if [ -d "$artifact_dir/dist" ]; then
+ echo "Dist directory contents:"
+ ls -la "$artifact_dir/dist/"
+ fi
+ exit 1
+ fi
- name: Determine release type
id: release_type
@@ -430,6 +551,8 @@ jobs:
./dist/apm-darwin-x86_64.tar.gz.sha256
./dist/apm-darwin-arm64.tar.gz
./dist/apm-darwin-arm64.tar.gz.sha256
+ ./dist/apm-windows-x86_64.zip
+ ./dist/apm-windows-x86_64.zip.sha256
# Publish to PyPI (only stable releases from public repo)
publish-pypi:
@@ -533,3 +656,42 @@ jobs:
"linux_arm64": "${{ steps.checksums.outputs.linux-arm64-sha }}"
}
}
+
+ # Update Scoop bucket (only stable releases from public repo)
+ update-scoop:
+ name: Update Scoop Bucket
+ runs-on: ubuntu-latest
+ needs: [test, build, integration-tests, release-validation, create-release, publish-pypi]
+ # TODO: Enable once downstream repository and secrets are configured (see #88)
+ if: false && github.ref_type == 'tag' && needs.create-release.outputs.is_private_repo != 'true' && needs.create-release.outputs.is_prerelease != 'true'
+ permissions:
+ contents: read
+
+ steps:
+ - name: Extract Windows checksum from GitHub release
+ id: checksums
+ run: |
+ RELEASE_TAG="${{ github.ref_name }}"
+ curl -L -o apm-windows-x86_64.zip.sha256 \
+ "https://github.com/${{ github.repository }}/releases/download/$RELEASE_TAG/apm-windows-x86_64.zip.sha256"
+ WINDOWS_X86_64_SHA=$(cat apm-windows-x86_64.zip.sha256 | cut -d' ' -f1)
+ echo "windows-x86_64-sha=$WINDOWS_X86_64_SHA" >> $GITHUB_OUTPUT
+ echo "Windows x86_64 SHA: $WINDOWS_X86_64_SHA"
+
+ - name: Trigger Scoop bucket repository update
+ uses: peter-evans/repository-dispatch@v3
+ with:
+ token: ${{ secrets.GH_PKG_PAT }}
+ repository: microsoft/scoop-apm
+ event-type: bucket-update
+ client-payload: |
+ {
+ "release": {
+ "version": "${{ github.ref_name }}",
+ "tag": "${{ github.ref_name }}",
+ "repository": "${{ github.repository }}"
+ },
+ "checksums": {
+ "windows_x86_64": "${{ steps.checksums.outputs.windows-x86_64-sha }}"
+ }
+ }
diff --git a/.github/workflows/ci-integration.yml b/.github/workflows/ci-integration.yml
index a76186f35..592bbd41d 100644
--- a/.github/workflows/ci-integration.yml
+++ b/.github/workflows/ci-integration.yml
@@ -44,7 +44,7 @@ jobs:
steps:
- run: echo "Internal PR auto-approved for ${{ github.event.workflow_run.head_branch }}"
- # Linux-only for fast PR feedback. Full platform smoke tests run post-merge.
+ # Linux smoke test
smoke-test:
needs: [approve-fork, approve-internal]
# Run if either approval job succeeded (the other will be skipped)
@@ -92,9 +92,9 @@ jobs:
GITHUB_APM_PAT: ${{ secrets.GH_CLI_PAT }}
run: uv run pytest tests/integration/test_runtime_smoke.py -v
- # Linux-only — downloads the single Linux binary artifact from ci.yml.
+ # Linux integration tests — downloads the Linux binary artifact from ci.yml.
integration-tests:
- name: Integration Tests
+ name: Integration Tests (Linux)
needs: [smoke-test]
if: always() && needs.smoke-test.result == 'success'
runs-on: ubuntu-24.04
@@ -145,9 +145,9 @@ jobs:
uv run ./scripts/test-integration.sh
timeout-minutes: 20
- # Linux-only — validates the Linux binary in isolation. Full platform validation runs post-merge.
+ # Linux release validation — validates the Linux binary in isolation.
release-validation:
- name: Release Validation
+ name: Release Validation (Linux)
needs: [integration-tests]
if: always() && needs.integration-tests.result == 'success'
runs-on: ubuntu-24.04
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a7079e883..4cc14d3fa 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -15,7 +15,7 @@ permissions:
contents: read
jobs:
- # Linux-only for fast PR feedback. Full platform matrix runs post-merge in build-release.yml.
+ # Linux-only for PR feedback. Full platform matrix (incl. macOS + Windows) runs post-merge in build-release.yml.
test:
runs-on: ubuntu-24.04
permissions:
@@ -57,7 +57,7 @@ jobs:
# Linux-only binary build for PR validation. Full platform builds run post-merge.
build:
- name: Build APM Binary
+ name: Build APM Binary (Linux)
needs: [test]
runs-on: ubuntu-24.04
permissions:
diff --git a/README.md b/README.md
index f61b49d87..7e35b1bb0 100644
--- a/README.md
+++ b/README.md
@@ -46,13 +46,25 @@ apm install # every agent is configured
## Get Started
+#### Linux / macOS
+
```bash
curl -sSL https://raw.githubusercontent.com/microsoft/apm/main/install.sh | sh
```
+#### Windows
+
+```powershell
+powershell -ExecutionPolicy Bypass -c "irm https://raw.githubusercontent.com/microsoft/apm/main/install.ps1 | iex"
+```
+
+Native release binaries are published for macOS, Linux, and Windows x86_64. `apm update` reuses the matching platform installer.
+
Other install methods
+#### Linux / macOS
+
```bash
# Homebrew
brew install microsoft/apm/apm
@@ -60,6 +72,16 @@ brew install microsoft/apm/apm
pip install apm-cli
```
+#### Windows
+
+```powershell
+# Scoop
+scoop bucket add apm https://github.com/microsoft/scoop-apm
+scoop install apm
+# pip
+pip install apm-cli
+```
+
Then start adding packages:
diff --git a/build/apm.spec b/build/apm.spec
index d6501f8c5..bfaa75c5f 100644
--- a/build/apm.spec
+++ b/build/apm.spec
@@ -23,10 +23,15 @@ entry_point = repo_root / 'src' / 'apm_cli' / 'cli.py'
# Data files to include - recursively include all template files
datas = [
(str(repo_root / 'scripts' / 'runtime'), 'scripts/runtime'), # Bundle runtime setup scripts
- (str(repo_root / 'scripts' / 'github-token-helper.sh'), 'scripts'), # Bundle GitHub token helper
(str(repo_root / 'pyproject.toml'), '.'), # Bundle pyproject.toml for version reading
]
+# Bundle platform-appropriate token helper
+if sys.platform == 'win32':
+ datas.append((str(repo_root / 'scripts' / 'windows' / 'github-token-helper.ps1'), 'scripts'))
+else:
+ datas.append((str(repo_root / 'scripts' / 'github-token-helper.sh'), 'scripts'))
+
# Recursively add all files from templates directory, including hidden directories
def collect_template_files(templates_root):
"""Recursively collect all template files, including those in hidden directories."""
@@ -205,6 +210,9 @@ a = Analysis(
pyz = PYZ(a.pure, a.zipped_data, cipher=None)
+# GNU strip corrupts Windows PE/COFF binaries; only enable on Unix
+_strip = sys.platform != 'win32'
+
# Switch to --onedir for directory-based deployment (faster startup with --onedir)
exe = EXE(
pyz,
@@ -214,7 +222,7 @@ exe = EXE(
name='apm',
debug=False,
bootloader_ignore_signals=False,
- strip=True, # Strip debug symbols for smaller size
+ strip=_strip, # Strip debug symbols (Unix only; corrupts Windows DLLs)
upx=is_upx_available(), # Enable UPX compression only if available
upx_exclude=[],
runtime_tmpdir=None,
@@ -231,7 +239,7 @@ coll = COLLECT(
a.binaries,
a.zipfiles,
a.datas,
- strip=True,
+ strip=_strip,
upx=is_upx_available(),
upx_exclude=[],
name='apm'
diff --git a/docs/src/content/docs/getting-started/installation.md b/docs/src/content/docs/getting-started/installation.md
index 995e28020..ddd0660e5 100644
--- a/docs/src/content/docs/getting-started/installation.md
+++ b/docs/src/content/docs/getting-started/installation.md
@@ -17,7 +17,25 @@ sidebar:
curl -sSL https://raw.githubusercontent.com/microsoft/apm/main/install.sh | sh
```
-The install script detects your platform, downloads the latest binary, and installs it to `/usr/local/bin/`.
+On Windows PowerShell:
+
+```powershell
+powershell -ExecutionPolicy Bypass -c "irm https://raw.githubusercontent.com/microsoft/apm/main/install.ps1 | iex"
+```
+
+This script automatically:
+- Detects your platform (macOS/Linux/Windows, Intel/ARM)
+- Downloads the latest binary
+- Installs to `/usr/local/bin/` on macOS/Linux
+- Installs under `%LOCALAPPDATA%\Programs\apm\` on Windows and adds a user-level `apm` shim to `PATH`
+- Verifies installation
+
+### Windows Package Manager (Scoop)
+
+```powershell
+scoop bucket add apm https://github.com/microsoft/scoop-apm
+scoop install apm
+```
## pip install
@@ -31,6 +49,21 @@ Requires Python 3.10+.
Download the archive for your platform from [GitHub Releases](https://github.com/microsoft/apm/releases/latest) and install manually:
+#### Windows x86_64
+
+```powershell
+# Download and extract the Windows binary
+Invoke-WebRequest -Uri https://github.com/microsoft/apm/releases/latest/download/apm-windows-x86_64.zip -OutFile apm-windows-x86_64.zip
+Expand-Archive -Path .\apm-windows-x86_64.zip -DestinationPath .
+
+# Copy to a permanent location and add to PATH
+$installDir = "$env:LOCALAPPDATA\Programs\apm"
+New-Item -ItemType Directory -Force -Path $installDir | Out-Null
+Copy-Item -Path .\apm-windows-x86_64\* -Destination $installDir -Recurse -Force
+[Environment]::SetEnvironmentVariable("Path", "$installDir;" + [Environment]::GetEnvironmentVariable("Path", "User"), "User")
+```
+
+#### macOS / Linux
```bash
# Example: macOS Apple Silicon
curl -L https://github.com/microsoft/apm/releases/latest/download/apm-darwin-arm64.tar.gz | tar -xz
@@ -107,6 +140,10 @@ mkdir -p ~/bin
# then install the binary to ~/bin/apm and add ~/bin to PATH
```
+### Verify Installation
+
+Check what runtimes are available:
+
### Authentication errors when installing packages
If `apm install` fails with authentication errors for private repositories, ensure you have a valid GitHub token configured:
diff --git a/docs/src/content/docs/reference/cli-commands.md b/docs/src/content/docs/reference/cli-commands.md
index 168b4a900..fbfa3dbb0 100644
--- a/docs/src/content/docs/reference/cli-commands.md
+++ b/docs/src/content/docs/reference/cli-commands.md
@@ -435,7 +435,7 @@ apm update
**Behavior:**
- Fetches latest release from GitHub
- Compares with current installed version
-- Downloads and runs the official install script
+- Downloads and runs the official platform installer (`install.sh` on macOS/Linux, `install.ps1` on Windows)
- Preserves existing configuration and projects
- Shows progress and success/failure status
@@ -451,10 +451,17 @@ This check is non-blocking and cached to avoid slowing down the CLI.
**Manual Update:**
If the automatic update fails, you can always update manually:
+
+#### Linux / macOS
```bash
curl -sSL https://raw.githubusercontent.com/microsoft/apm/main/install.sh | sh
```
+#### Windows
+```powershell
+powershell -ExecutionPolicy Bypass -c "irm https://raw.githubusercontent.com/microsoft/apm/main/install.ps1 | iex"
+```
+
### `apm deps` - Manage APM package dependencies
Manage APM package dependencies with installation status, tree visualization, and package information.
@@ -1032,6 +1039,11 @@ apm runtime setup codex
apm runtime setup llm
```
+**Windows support:**
+- On Windows, APM runs the setup scripts through PowerShell automatically
+- No special flags are required
+- Platform detection is automatic
+
**Default Behavior:**
- Installs runtime binary from official sources
- Configures with GitHub Models (free) as APM default
diff --git a/install.ps1 b/install.ps1
new file mode 100644
index 000000000..a4386bb0f
--- /dev/null
+++ b/install.ps1
@@ -0,0 +1,392 @@
+param(
+ [string]$Repo = "microsoft/apm"
+)
+
+$ErrorActionPreference = "Stop"
+
+$installRoot = Join-Path $env:LOCALAPPDATA "Programs\apm"
+$binDir = Join-Path $installRoot "bin"
+$releasesDir = Join-Path $installRoot "releases"
+$assetName = "apm-windows-x86_64.zip"
+
+# ---------------------------------------------------------------------------
+# Helper functions
+# ---------------------------------------------------------------------------
+
+function Write-Info {
+ param([string]$Message)
+ Write-Host $Message -ForegroundColor Cyan
+}
+
+function Write-Success {
+ param([string]$Message)
+ Write-Host $Message -ForegroundColor Green
+}
+
+function Write-WarningText {
+ param([string]$Message)
+ Write-Host $Message -ForegroundColor Yellow
+}
+
+function Write-ErrorText {
+ param([string]$Message)
+ Write-Host $Message -ForegroundColor Red
+}
+
+function Get-AuthHeader {
+ if ($env:GITHUB_APM_PAT) {
+ return @{ Authorization = "token $($env:GITHUB_APM_PAT)" }
+ }
+
+ if ($env:GITHUB_TOKEN) {
+ return @{ Authorization = "token $($env:GITHUB_TOKEN)" }
+ }
+
+ return @{}
+}
+
+function Invoke-GitHubJson {
+ param(
+ [string]$Url,
+ [hashtable]$Headers
+ )
+
+ if ($Headers.Count -gt 0) {
+ return Invoke-RestMethod -Uri $Url -Headers $Headers
+ }
+
+ return Invoke-RestMethod -Uri $Url
+}
+
+function Add-ToUserPath {
+ param([string]$PathEntry)
+
+ $currentUserPath = [Environment]::GetEnvironmentVariable("Path", "User")
+ $userEntries = @()
+ if ($currentUserPath) {
+ $userEntries = $currentUserPath.Split(";", [System.StringSplitOptions]::RemoveEmptyEntries)
+ }
+
+ if ($userEntries -notcontains $PathEntry) {
+ $newUserPath = if ($currentUserPath) { "$PathEntry;$currentUserPath" } else { $PathEntry }
+ [Environment]::SetEnvironmentVariable("Path", $newUserPath, "User")
+ Write-Info "Added $PathEntry to your user PATH."
+ }
+
+ if (($env:Path -split ";") -notcontains $PathEntry) {
+ $env:Path = "$PathEntry;$env:Path"
+ }
+}
+
+function Test-PythonRequirement {
+ foreach ($cmd in @("python3", "python")) {
+ $exe = Get-Command $cmd -ErrorAction SilentlyContinue
+ if ($exe) {
+ try {
+ $verStr = & $cmd -c "import sys; print('.'.join(map(str, sys.version_info[:2])))" 2>$null
+ if ($verStr) {
+ $parts = $verStr.Split('.')
+ $major = [int]$parts[0]
+ $minor = [int]$parts[1]
+ if ($major -gt 3 -or ($major -eq 3 -and $minor -ge 9)) {
+ return $cmd
+ }
+ }
+ } catch {
+ # Ignore; try next candidate
+ }
+ }
+ }
+ return $null
+}
+
+function Install-ViaPip {
+ $pythonCmd = Test-PythonRequirement
+ if (-not $pythonCmd) {
+ Write-ErrorText "Python 3.9+ is not available — cannot fall back to pip."
+ return $false
+ }
+
+ Write-Info "Attempting installation via pip ($pythonCmd)..."
+
+ $pipCmd = $null
+ foreach ($candidate in @("pip3", "pip")) {
+ if (Get-Command $candidate -ErrorAction SilentlyContinue) {
+ $pipCmd = $candidate
+ break
+ }
+ }
+ if (-not $pipCmd) {
+ $pipCmd = "$pythonCmd -m pip"
+ }
+
+ try {
+ $pipArgs = "install --user apm-cli"
+ if ($pipCmd -like "* -m pip") {
+ $output = & $pythonCmd -m pip install --user apm-cli 2>&1
+ $pipExitCode = $LASTEXITCODE
+ $output | Write-Host
+ } else {
+ $output = & $pipCmd install --user apm-cli 2>&1
+ $pipExitCode = $LASTEXITCODE
+ $output | Write-Host
+ }
+ if ($pipExitCode -ne 0) {
+ Write-ErrorText "pip install failed (exit code $pipExitCode)."
+ return $false
+ }
+ } catch {
+ Write-ErrorText "pip install failed: $_"
+ return $false
+ }
+
+ # Verify apm is available after pip install
+ $apmExe = Get-Command apm -ErrorAction SilentlyContinue
+ if ($apmExe) {
+ $ver = & apm --version 2>$null
+ Write-Success "APM installed successfully via pip! Version: $ver"
+ Write-Info "Location: $($apmExe.Source)"
+ } else {
+ Write-WarningText "APM installed but not found in PATH."
+ Write-Host "You may need to add your Python user scripts directory to PATH."
+ }
+ return $true
+}
+
+function Write-ManualInstallHelp {
+ Write-Host ""
+ Write-Info "Manual installation options:"
+ Write-Host " 1. pip (recommended): pip install --user apm-cli"
+ Write-Host " 2. From source:"
+ Write-Host " git clone https://github.com/$Repo.git"
+ Write-Host " cd apm && uv sync && uv run pip install -e ."
+ Write-Host ""
+ Write-Host "Need help? Create an issue at: https://github.com/$Repo/issues"
+}
+
+# ---------------------------------------------------------------------------
+# Banner
+# ---------------------------------------------------------------------------
+
+Write-Host ""
+Write-Host "===========================================================" -ForegroundColor Blue
+Write-Host " APM Installer " -ForegroundColor Blue
+Write-Host " The NPM for AI-Native Development " -ForegroundColor Blue
+Write-Host "===========================================================" -ForegroundColor Blue
+Write-Host ""
+
+# ---------------------------------------------------------------------------
+# Stage 1 — Fetch release info (unauthenticated first, then authenticated)
+# ---------------------------------------------------------------------------
+
+Write-Info "Fetching latest release information..."
+
+$release = $null
+$headers = @{}
+
+# Try unauthenticated first
+try {
+ $release = Invoke-RestMethod -Uri "https://api.github.com/repos/$Repo/releases/latest"
+} catch {
+ # Swallow — will try authenticated below
+}
+
+if (-not $release -or -not $release.tag_name) {
+ Write-Info "Unauthenticated request failed or returned no data. Retrying with authentication..."
+ $headers = Get-AuthHeader
+ if ($headers.Count -eq 0) {
+ Write-ErrorText "Repository may be private but no authentication token found."
+ Write-Host "Set GITHUB_APM_PAT or GITHUB_TOKEN and retry."
+ Write-ManualInstallHelp
+ exit 1
+ }
+ try {
+ $release = Invoke-GitHubJson -Url "https://api.github.com/repos/$Repo/releases/latest" -Headers $headers
+ } catch {
+ Write-ErrorText "Failed to fetch release information: $_"
+ Write-ManualInstallHelp
+ exit 1
+ }
+}
+
+if (-not $release.tag_name) {
+ Write-ErrorText "Could not determine the latest release tag."
+ Write-ManualInstallHelp
+ exit 1
+}
+
+$asset = $release.assets | Where-Object { $_.name -eq $assetName } | Select-Object -First 1
+if (-not $asset) {
+ Write-ErrorText "Release $($release.tag_name) does not contain $assetName."
+ Write-ManualInstallHelp
+ exit 1
+}
+
+$tagName = $release.tag_name
+Write-Success "Latest version: $tagName"
+
+$releaseDir = Join-Path $releasesDir $tagName
+$tempDir = Join-Path ([System.IO.Path]::GetTempPath()) ("apm-install-" + [System.Guid]::NewGuid().ToString("N"))
+$zipPath = Join-Path $tempDir $assetName
+
+New-Item -ItemType Directory -Force -Path $tempDir | Out-Null
+New-Item -ItemType Directory -Force -Path $binDir | Out-Null
+New-Item -ItemType Directory -Force -Path $releasesDir | Out-Null
+
+try {
+ # ------------------------------------------------------------------
+ # Stage 2 — Download binary (3-stage fallback chain)
+ # ------------------------------------------------------------------
+
+ Write-Info "Downloading $assetName from $tagName..."
+
+ $downloadOk = $false
+
+ # 2a. Direct browser_download_url without auth
+ try {
+ Invoke-WebRequest -Uri $asset.browser_download_url -OutFile $zipPath -UseBasicParsing
+ $downloadOk = $true
+ Write-Success "Download successful"
+ } catch {
+ Write-WarningText "Unauthenticated download failed, retrying with authentication..."
+ }
+
+ # 2b. API asset URL with Accept: application/octet-stream (authenticated)
+ if (-not $downloadOk) {
+ if ($headers.Count -eq 0) { $headers = Get-AuthHeader }
+ if ($headers.Count -gt 0 -and $asset.url) {
+ try {
+ $apiHeaders = $headers.Clone()
+ $apiHeaders["Accept"] = "application/octet-stream"
+ Invoke-WebRequest -Uri $asset.url -Headers $apiHeaders -OutFile $zipPath -UseBasicParsing
+ $downloadOk = $true
+ Write-Success "Download successful via GitHub API"
+ } catch {
+ Write-WarningText "API download failed, trying direct URL with auth..."
+ }
+ }
+ }
+
+ # 2c. Direct browser_download_url with auth header
+ if (-not $downloadOk) {
+ if ($headers.Count -eq 0) { $headers = Get-AuthHeader }
+ if ($headers.Count -gt 0) {
+ try {
+ Invoke-WebRequest -Uri $asset.browser_download_url -Headers $headers -OutFile $zipPath -UseBasicParsing
+ $downloadOk = $true
+ Write-Success "Download successful with authentication"
+ } catch {
+ # Will fall through to pip fallback
+ }
+ }
+ }
+
+ if (-not $downloadOk) {
+ Write-ErrorText "All download attempts failed."
+ Write-Host "This might mean:"
+ Write-Host " - Network connectivity issues"
+ Write-Host " - Invalid GitHub token or insufficient permissions"
+ Write-Host " - Private repository requires authentication"
+ Write-Host ""
+
+ Write-Info "Attempting automatic fallback to pip..."
+ if (Install-ViaPip) { exit 0 }
+ Write-ManualInstallHelp
+ exit 1
+ }
+
+ # ------------------------------------------------------------------
+ # Verify checksum (if .sha256 asset is available)
+ # ------------------------------------------------------------------
+
+ $sha256AssetName = "$assetName.sha256"
+ $sha256Asset = $release.assets | Where-Object { $_.name -eq $sha256AssetName } | Select-Object -First 1
+ if ($sha256Asset) {
+ Write-Info "Verifying download checksum..."
+ $sha256Path = Join-Path $tempDir $sha256AssetName
+ try {
+ Invoke-WebRequest -Uri $sha256Asset.browser_download_url -OutFile $sha256Path -UseBasicParsing
+ $expectedHash = (Get-Content $sha256Path -Raw).Trim().Split(" ")[0]
+ $actualHash = (Get-FileHash -Path $zipPath -Algorithm SHA256).Hash.ToLower()
+ if ($actualHash -ne $expectedHash) {
+ Write-ErrorText "Checksum verification FAILED."
+ Write-Host " Expected: $expectedHash"
+ Write-Host " Actual: $actualHash"
+ Write-Info "Attempting automatic fallback to pip..."
+ if (Install-ViaPip) { exit 0 }
+ Write-ManualInstallHelp
+ exit 1
+ }
+ Write-Success "Checksum verified"
+ } catch {
+ Write-WarningText "Could not verify checksum (non-fatal): $_"
+ }
+ }
+
+ # ------------------------------------------------------------------
+ # Extract
+ # ------------------------------------------------------------------
+
+ Write-Info "Extracting package..."
+ Expand-Archive -Path $zipPath -DestinationPath $tempDir -Force
+
+ $packageDir = Join-Path $tempDir "apm-windows-x86_64"
+ $exePath = Join-Path $packageDir "apm.exe"
+ if (-not (Test-Path $exePath)) {
+ Write-ErrorText "Extracted package is missing apm.exe."
+ Write-Info "Attempting automatic fallback to pip..."
+ if (Install-ViaPip) { exit 0 }
+ Write-ManualInstallHelp
+ exit 1
+ }
+
+ # ------------------------------------------------------------------
+ # Stage 3 — Binary test before installation
+ # ------------------------------------------------------------------
+
+ Write-Info "Testing binary..."
+ try {
+ $testOutput = & $exePath --version 2>&1
+ if ($LASTEXITCODE -ne 0) { throw "exit code $LASTEXITCODE" }
+ Write-Success "Binary test successful: $testOutput"
+ } catch {
+ Write-ErrorText "Downloaded binary failed to run: $_"
+ Write-Host ""
+ Write-Info "Attempting automatic fallback to pip..."
+ if (Install-ViaPip) { exit 0 }
+ Write-ManualInstallHelp
+ exit 1
+ }
+
+ # ------------------------------------------------------------------
+ # Install
+ # ------------------------------------------------------------------
+
+ if (Test-Path $releaseDir) {
+ Remove-Item -Recurse -Force $releaseDir
+ }
+
+ Move-Item -Path $packageDir -Destination $releaseDir
+
+ $shimPath = Join-Path $binDir "apm.cmd"
+ $shimContent = "@echo off`r`n`"$releaseDir\apm.exe`" %*`r`n"
+ Set-Content -Path $shimPath -Value $shimContent -Encoding ASCII
+
+ Add-ToUserPath -PathEntry $binDir
+
+ Write-Host ""
+ Write-Success "APM $tagName installed successfully!"
+ Write-Info "Command shim: $shimPath"
+ Write-Host ""
+ Write-Info "Quick start:"
+ Write-Host " apm init my-app # Create a new APM project"
+ Write-Host " cd my-app && apm install # Install dependencies"
+ Write-Host " apm run # Run your first prompt"
+ Write-Host ""
+ Write-Host "Documentation: https://github.com/$Repo"
+ Write-Info "Run 'apm --version' in a new terminal to verify the installation."
+} finally {
+ if (Test-Path $tempDir) {
+ Remove-Item -Recurse -Force $tempDir
+ }
+}
\ No newline at end of file
diff --git a/scripts/runtime/setup-codex.ps1 b/scripts/runtime/setup-codex.ps1
new file mode 100644
index 000000000..6557a20a6
--- /dev/null
+++ b/scripts/runtime/setup-codex.ps1
@@ -0,0 +1,197 @@
+# Setup script for Codex runtime (Windows)
+# Downloads Codex binary from GitHub releases and configures with GitHub Models
+
+param(
+ [switch]$Vanilla,
+ [string]$Version = "latest"
+)
+
+$ErrorActionPreference = "Stop"
+
+# Source common utilities
+. "$PSScriptRoot\setup-common.ps1"
+
+# Source token helper (look in same dir first, then parent)
+$tokenHelperPath = Join-Path $PSScriptRoot "github-token-helper.ps1"
+if (-not (Test-Path $tokenHelperPath)) {
+ $tokenHelperPath = Join-Path (Split-Path $PSScriptRoot) "github-token-helper.ps1"
+}
+if (Test-Path $tokenHelperPath) {
+ . $tokenHelperPath
+}
+
+# Configuration
+$CodexRepo = "openai/codex"
+
+function Install-Codex {
+ Write-Info "Setting up Codex runtime..."
+
+ # Detect platform
+ Get-Platform
+
+ # Map APM platform to Codex binary format
+ switch ($script:DETECTED_PLATFORM) {
+ "windows-x86_64" { $codexPlatform = "x86_64-pc-windows-msvc" }
+ "windows-arm64" { $codexPlatform = "aarch64-pc-windows-msvc" }
+ default {
+ Write-ErrorText "Unsupported platform: $script:DETECTED_PLATFORM"
+ exit 1
+ }
+ }
+
+ Initialize-ApmRuntimeDir
+
+ $runtimeDir = Join-Path $env:USERPROFILE ".apm" "runtimes"
+ $codexBinary = Join-Path $runtimeDir "codex.exe"
+ $codexConfigDir = Join-Path $env:USERPROFILE ".codex"
+ $codexConfig = Join-Path $codexConfigDir "config.toml"
+ $tempDir = Join-Path $env:TEMP "apm-codex-install"
+
+ if (-not (Test-Path $tempDir)) {
+ New-Item -ItemType Directory -Force -Path $tempDir | Out-Null
+ }
+
+ # Determine download URL
+ $authHeaders = @{}
+ if ($env:GITHUB_TOKEN) {
+ $authHeaders["Authorization"] = "Bearer $($env:GITHUB_TOKEN)"
+ Write-Info "Using authenticated GitHub API request (GITHUB_TOKEN)"
+ } elseif ($env:GITHUB_APM_PAT) {
+ $authHeaders["Authorization"] = "Bearer $($env:GITHUB_APM_PAT)"
+ Write-Info "Using authenticated GitHub API request (GITHUB_APM_PAT)"
+ } else {
+ Write-Info "Using unauthenticated GitHub API request (60 requests/hour limit)"
+ }
+
+ if ($Version -eq "latest") {
+ Write-Info "Fetching latest Codex release information..."
+ $releaseUrl = "https://api.github.com/repos/$CodexRepo/releases/latest"
+ $params = @{ Uri = $releaseUrl }
+ if ($authHeaders.Count -gt 0) { $params["Headers"] = $authHeaders }
+
+ try {
+ $release = Invoke-RestMethod @params
+ $latestTag = $release.tag_name
+ } catch {
+ Write-ErrorText "Failed to fetch latest release tag from GitHub API"
+ exit 1
+ }
+
+ if (-not $latestTag) {
+ Write-ErrorText "Failed to determine latest release tag"
+ exit 1
+ }
+
+ Write-Info "Using Codex release: $latestTag"
+ $downloadUrl = "https://github.com/$CodexRepo/releases/download/$latestTag/codex-$codexPlatform.tar.gz"
+ } else {
+ $downloadUrl = "https://github.com/$CodexRepo/releases/download/$Version/codex-$codexPlatform.tar.gz"
+ }
+
+ # Download archive
+ $tarFile = Join-Path $tempDir "codex-$codexPlatform.tar.gz"
+ $dlHeaders = @{}
+ if ($authHeaders.Count -gt 0) { $dlHeaders = $authHeaders }
+ Save-File -Url $downloadUrl -Output $tarFile -Description "Codex binary archive" -Headers $dlHeaders
+
+ # Extract (tar is available on Windows 10+)
+ Write-Info "Extracting Codex binary..."
+ Push-Location $tempDir
+ tar -xzf $tarFile
+ Pop-Location
+
+ # Find extracted binary
+ $extractedBinary = $null
+ $candidates = @(
+ (Join-Path $tempDir "codex.exe"),
+ (Join-Path $tempDir "codex"),
+ (Join-Path $tempDir "codex-$codexPlatform.exe"),
+ (Join-Path $tempDir "codex-$codexPlatform")
+ )
+ foreach ($candidate in $candidates) {
+ if (Test-Path $candidate) {
+ $extractedBinary = $candidate
+ break
+ }
+ }
+
+ if (-not $extractedBinary) {
+ Write-ErrorText "Codex binary not found in extracted archive. Contents:"
+ Get-ChildItem $tempDir | Format-Table Name
+ exit 1
+ }
+
+ # Move to final location
+ Move-Item -Force $extractedBinary $codexBinary
+
+ # Clean up
+ Remove-Item -Recurse -Force $tempDir -ErrorAction SilentlyContinue
+
+ Test-Binary $codexBinary "Codex"
+
+ # Create configuration if not vanilla
+ if (-not $Vanilla) {
+ # Use centralized token management
+ if (Get-Command Initialize-GitHubToken -ErrorAction SilentlyContinue) {
+ Initialize-GitHubToken
+ }
+
+ if (-not (Test-Path $codexConfigDir)) {
+ Write-Info "Creating Codex config directory: $codexConfigDir"
+ New-Item -ItemType Directory -Force -Path $codexConfigDir | Out-Null
+ }
+
+ Write-Info "Creating Codex configuration for GitHub Models (APM default)..."
+
+ $githubTokenVar = "GITHUB_TOKEN"
+ if ($env:GITHUB_TOKEN) {
+ Write-Info "Using GITHUB_TOKEN for GitHub Models authentication"
+ } elseif ($env:GITHUB_APM_PAT) {
+ $githubTokenVar = "GITHUB_APM_PAT"
+ Write-WarningText "Using GITHUB_APM_PAT for GitHub Models (may not work if org-scoped)"
+ } else {
+ Write-Info "No GitHub token found - you'll need to set GITHUB_TOKEN"
+ }
+
+ @"
+model_provider = "github-models"
+model = "openai/gpt-4o"
+
+[model_providers.github-models]
+name = "GitHub Models"
+base_url = "https://models.github.ai/inference/"
+env_key = "$githubTokenVar"
+wire_api = "chat"
+"@ | Set-Content -Path $codexConfig -Encoding UTF8
+
+ Write-Success "Codex configuration created at $codexConfig"
+ } else {
+ Write-Info "Vanilla mode: Skipping APM configuration"
+ }
+
+ Update-UserPath
+
+ # Test installation
+ Write-Info "Testing Codex installation..."
+ try {
+ $ver = & $codexBinary --version 2>&1
+ Write-Success "Codex runtime installed successfully! Version: $ver"
+ } catch {
+ Write-WarningText "Codex binary installed but version check failed. It may still work."
+ }
+
+ Write-Host ""
+ Write-Info "Next steps:"
+ if (-not $Vanilla) {
+ Write-Host "1. Set up your APM project: apm init my-project"
+ Write-Host "2. Install MCP servers: apm install"
+ Write-Host "3. Set your token: `$env:GITHUB_TOKEN = 'your_token_here'"
+ Write-Host "4. Run: apm run start --param name=YourName"
+ Write-Success "Codex installed and configured with GitHub Models!"
+ } else {
+ Write-Host "1. Configure Codex with your preferred provider"
+ Write-Host "2. Then run with APM: apm run start"
+ }
+}
+
+Install-Codex
diff --git a/scripts/runtime/setup-common.ps1 b/scripts/runtime/setup-common.ps1
new file mode 100644
index 000000000..dbb0eac3b
--- /dev/null
+++ b/scripts/runtime/setup-common.ps1
@@ -0,0 +1,91 @@
+# Common utilities for runtime setup scripts (Windows PowerShell)
+
+$ErrorActionPreference = "Stop"
+
+# Logging functions
+function Write-Info { param([string]$Message) Write-Host "[INFO] $Message" -ForegroundColor Blue }
+function Write-Success { param([string]$Message) Write-Host "[OK] $Message" -ForegroundColor Green }
+function Write-WarningText { param([string]$Message) Write-Host "[WARN] $Message" -ForegroundColor Yellow }
+function Write-ErrorText { param([string]$Message) Write-Host "[ERROR] $Message" -ForegroundColor Red }
+
+# Platform detection
+function Get-Platform {
+ $arch = [System.Runtime.InteropServices.RuntimeInformation]::ProcessArchitecture
+ switch ($arch) {
+ "X64" { $script:DETECTED_PLATFORM = "windows-x86_64" }
+ "Arm64" { $script:DETECTED_PLATFORM = "windows-arm64" }
+ default {
+ Write-ErrorText "Unsupported architecture: $arch"
+ exit 1
+ }
+ }
+ Write-Info "Detected platform: $script:DETECTED_PLATFORM"
+}
+
+# Create APM runtime directory
+function Initialize-ApmRuntimeDir {
+ $runtimeDir = Join-Path $env:USERPROFILE ".apm" "runtimes"
+ if (-not (Test-Path $runtimeDir)) {
+ Write-Info "Creating APM runtime directory: $runtimeDir"
+ New-Item -ItemType Directory -Force -Path $runtimeDir | Out-Null
+ }
+}
+
+# Add APM runtimes to user PATH if not already present
+function Update-UserPath {
+ $runtimeDir = Join-Path $env:USERPROFILE ".apm" "runtimes"
+
+ # Update current session PATH
+ if ($env:PATH -notlike "*$runtimeDir*") {
+ $env:PATH = "$runtimeDir;$env:PATH"
+ Write-Info "Added $runtimeDir to current session PATH"
+ }
+
+ # Persist to user PATH
+ $userPath = [Environment]::GetEnvironmentVariable("Path", "User")
+ if ($userPath -notlike "*$runtimeDir*") {
+ $newPath = "$runtimeDir;$userPath"
+ [Environment]::SetEnvironmentVariable("Path", $newPath, "User")
+ Write-Info "Added $runtimeDir to persistent user PATH"
+ } else {
+ Write-Info "PATH already configured for $runtimeDir"
+ }
+
+ Write-Success "Runtime binaries are now available!"
+}
+
+# Download file using Invoke-WebRequest
+function Save-File {
+ param(
+ [string]$Url,
+ [string]$Output,
+ [string]$Description = "file",
+ [hashtable]$Headers = @{}
+ )
+
+ Write-Info "Downloading $Description from $Url"
+ $params = @{
+ Uri = $Url
+ OutFile = $Output
+ UseBasicParsing = $true
+ }
+ if ($Headers.Count -gt 0) {
+ $params["Headers"] = $Headers
+ }
+ Invoke-WebRequest @params
+}
+
+# Verify binary exists
+function Test-Binary {
+ param(
+ [string]$BinaryPath,
+ [string]$BinaryName
+ )
+
+ if (-not (Test-Path $BinaryPath)) {
+ Write-ErrorText "$BinaryName binary not found at $BinaryPath"
+ exit 1
+ }
+
+ Write-Success "$BinaryName binary installed and verified"
+}
diff --git a/scripts/runtime/setup-copilot.ps1 b/scripts/runtime/setup-copilot.ps1
new file mode 100644
index 000000000..508041350
--- /dev/null
+++ b/scripts/runtime/setup-copilot.ps1
@@ -0,0 +1,170 @@
+# Setup script for GitHub Copilot CLI runtime (Windows)
+# Installs @github/copilot with MCP configuration support
+
+param(
+ [switch]$Vanilla
+)
+
+$ErrorActionPreference = "Stop"
+
+# Source common utilities
+. "$PSScriptRoot\setup-common.ps1"
+
+# Configuration
+$CopilotPackage = "@github/copilot"
+$NodeMinVersion = 22
+$NpmMinVersion = 10
+
+function Test-NodeVersion {
+ Write-Info "Checking Node.js version..."
+
+ $node = Get-Command node -ErrorAction SilentlyContinue
+ if (-not $node) {
+ Write-ErrorText "Node.js is not installed"
+ Write-Info "Please install Node.js version $NodeMinVersion or higher from https://nodejs.org/"
+ exit 1
+ }
+
+ $nodeVersion = (node --version) -replace '^v', ''
+ $nodeMajor = [int]($nodeVersion.Split('.')[0])
+
+ if ($nodeMajor -lt $NodeMinVersion) {
+ Write-ErrorText "Node.js version $nodeVersion is too old. Required: v$NodeMinVersion or higher"
+ Write-Info "Please update Node.js from https://nodejs.org/"
+ exit 1
+ }
+
+ Write-Success "Node.js version $nodeVersion"
+}
+
+function Test-NpmVersion {
+ Write-Info "Checking npm version..."
+
+ $npm = Get-Command npm -ErrorAction SilentlyContinue
+ if (-not $npm) {
+ Write-ErrorText "npm is not installed"
+ Write-Info "Please install npm version $NpmMinVersion or higher"
+ exit 1
+ }
+
+ $npmVersion = npm --version
+ $npmMajor = [int]($npmVersion.Split('.')[0])
+
+ if ($npmMajor -lt $NpmMinVersion) {
+ Write-ErrorText "npm version $npmVersion is too old. Required: v$NpmMinVersion or higher"
+ Write-Info "Please update npm with: npm install -g npm@latest"
+ exit 1
+ }
+
+ Write-Success "npm version $npmVersion"
+}
+
+function Install-CopilotCli {
+ Write-Info "Installing GitHub Copilot CLI..."
+
+ try {
+ npm install -g $CopilotPackage
+ Write-Success "Successfully installed $CopilotPackage"
+ } catch {
+ Write-ErrorText "Failed to install $CopilotPackage"
+ Write-Info "This might be due to:"
+ Write-Info " - Insufficient permissions (try running as Administrator)"
+ Write-Info " - Network connectivity issues"
+ Write-Info " - Node.js/npm version compatibility"
+ exit 1
+ }
+}
+
+function Initialize-CopilotDirectory {
+ Write-Info "Setting up Copilot CLI directory structure..."
+
+ $copilotConfigDir = Join-Path $env:USERPROFILE ".copilot"
+ $mcpConfigFile = Join-Path $copilotConfigDir "mcp-config.json"
+
+ if (-not (Test-Path $copilotConfigDir)) {
+ Write-Info "Creating Copilot config directory: $copilotConfigDir"
+ New-Item -ItemType Directory -Force -Path $copilotConfigDir | Out-Null
+ }
+
+ if (-not (Test-Path $mcpConfigFile)) {
+ Write-Info "Creating empty MCP configuration template..."
+ @'
+{
+ "mcpServers": {}
+}
+'@ | Set-Content -Path $mcpConfigFile -Encoding UTF8
+ Write-Info "Empty MCP configuration created at $mcpConfigFile"
+ Write-Info "Use 'apm install' to configure MCP servers"
+ } else {
+ Write-Info "MCP configuration already exists at $mcpConfigFile"
+ }
+}
+
+function Initialize-GithubMcpEnvironment {
+ Write-Info "Setting up GitHub MCP Server environment for Copilot CLI..."
+
+ $copilotToken = ""
+ if ($env:GITHUB_COPILOT_PAT) {
+ $copilotToken = $env:GITHUB_COPILOT_PAT
+ } elseif ($env:GITHUB_TOKEN) {
+ $copilotToken = $env:GITHUB_TOKEN
+ } elseif ($env:GITHUB_APM_PAT) {
+ $copilotToken = $env:GITHUB_APM_PAT
+ }
+
+ if ($copilotToken) {
+ $env:GITHUB_PERSONAL_ACCESS_TOKEN = $copilotToken
+ Write-Success "GitHub MCP Server environment configured"
+ Write-Info "Copilot CLI will automatically set up GitHub MCP Server on first run"
+ } else {
+ Write-WarningText "No GitHub token found for automatic MCP server setup"
+ Write-Info "Set GITHUB_COPILOT_PAT, GITHUB_APM_PAT, or GITHUB_TOKEN to enable automatic GitHub MCP Server"
+ }
+}
+
+function Test-CopilotInstallation {
+ Write-Info "Testing Copilot CLI installation..."
+
+ $copilot = Get-Command copilot -ErrorAction SilentlyContinue
+ if ($copilot) {
+ try {
+ $version = copilot --version
+ Write-Success "Copilot CLI installed successfully! Version: $version"
+ } catch {
+ Write-WarningText "Copilot CLI binary found but version check failed"
+ }
+ } else {
+ Write-ErrorText "Copilot CLI not found in PATH after installation"
+ Write-Info "You may need to restart your terminal or check your npm global installation path"
+ exit 1
+ }
+}
+
+# Main setup
+Write-Info "Setting up GitHub Copilot CLI runtime..."
+
+Test-NodeVersion
+Test-NpmVersion
+Install-CopilotCli
+
+if (-not $Vanilla) {
+ Initialize-CopilotDirectory
+ Initialize-GithubMcpEnvironment
+} else {
+ Write-Info "Vanilla mode: Skipping APM directory setup"
+ Write-Info "You can configure MCP servers manually in ~/.copilot/mcp-config.json"
+}
+
+Test-CopilotInstallation
+
+Write-Host ""
+Write-Info "Next steps:"
+if (-not $Vanilla) {
+ Write-Host "1. Set up your APM project with MCP dependencies:"
+ Write-Host " - Initialize project: apm init my-project"
+ Write-Host " - Install MCP servers: apm install"
+ Write-Host "2. Run: apm run start --param name=YourName"
+} else {
+ Write-Host "1. Configure Copilot CLI manually"
+ Write-Host "2. Then run with APM: apm run start"
+}
diff --git a/scripts/runtime/setup-llm.ps1 b/scripts/runtime/setup-llm.ps1
new file mode 100644
index 000000000..6685596bf
--- /dev/null
+++ b/scripts/runtime/setup-llm.ps1
@@ -0,0 +1,82 @@
+# Setup script for LLM runtime (Windows)
+# Installs Simon Willison's llm library via pip in a managed environment
+
+param(
+ [switch]$Vanilla
+)
+
+$ErrorActionPreference = "Stop"
+
+# Source common utilities
+. "$PSScriptRoot\setup-common.ps1"
+
+function Install-Llm {
+ Write-Info "Setting up LLM runtime..."
+
+ Initialize-ApmRuntimeDir
+
+ $runtimeDir = Join-Path $env:USERPROFILE ".apm" "runtimes"
+ $llmVenv = Join-Path $runtimeDir "llm-venv"
+ $llmWrapper = Join-Path $runtimeDir "llm.cmd"
+
+ # Check Python availability (on Windows it's 'python' not 'python3')
+ $python = Get-Command python -ErrorAction SilentlyContinue
+ if (-not $python) {
+ Write-ErrorText "Python is required but not found. Please install Python 3."
+ exit 1
+ }
+
+ # Create virtual environment
+ Write-Info "Creating Python virtual environment for LLM..."
+ python -m venv $llmVenv
+
+ $pipExe = Join-Path $llmVenv "Scripts" "pip.exe"
+ $llmExe = Join-Path $llmVenv "Scripts" "llm.exe"
+
+ # Install LLM
+ Write-Info "Installing LLM library..."
+ & $pipExe install --upgrade pip
+ & $pipExe install llm
+
+ # Install GitHub Models plugin in non-vanilla mode
+ if (-not $Vanilla) {
+ Write-Info "Installing GitHub Models plugin for APM defaults..."
+ & $pipExe install llm-github-models
+ Write-Success "GitHub Models plugin installed"
+ } else {
+ Write-Info "Vanilla mode: Skipping GitHub Models plugin installation"
+ }
+
+ # Create .cmd wrapper
+ Write-Info "Creating LLM wrapper script..."
+ @"
+@echo off
+"%USERPROFILE%\.apm\runtimes\llm-venv\Scripts\llm.exe" %*
+"@ | Set-Content -Path $llmWrapper -Encoding ASCII
+
+ Test-Binary $llmWrapper "LLM"
+
+ Update-UserPath
+
+ # Test installation
+ Write-Info "Testing LLM installation..."
+ try {
+ $ver = & $llmExe --version 2>&1
+ Write-Success "LLM runtime installed successfully! Version: $ver"
+ } catch {
+ Write-WarningText "LLM installed but version check failed. It may still work."
+ }
+
+ Write-Host ""
+ Write-Info "Next steps:"
+ if (-not $Vanilla) {
+ Write-Host "1. Set your GitHub token: `$env:GITHUB_TOKEN = 'your_token_here'"
+ Write-Host "2. Run with APM: apm run start --runtime=llm"
+ Write-Info "GitHub Models provides free access to OpenAI models with your GitHub token"
+ } else {
+ Write-Host "1. Configure LLM providers: llm keys set "
+ Write-Host "2. Run with APM: apm run start --runtime=llm"
+ }
+}
+
+Install-Llm
diff --git a/scripts/windows/build-binary.ps1 b/scripts/windows/build-binary.ps1
new file mode 100644
index 000000000..3a4c1d59e
--- /dev/null
+++ b/scripts/windows/build-binary.ps1
@@ -0,0 +1,100 @@
+# Build APM binary for Windows using PyInstaller
+# PowerShell equivalent of build-binary.sh
+
+$ErrorActionPreference = "Stop"
+
+# Platform detection
+$arch = [System.Runtime.InteropServices.RuntimeInformation]::ProcessArchitecture
+switch ($arch) {
+ "X64" { $Arch = "x86_64" }
+ "Arm64" { $Arch = "x86_64" } # x86_64 emulation on ARM64
+ default {
+ Write-Host "Unsupported architecture: $arch" -ForegroundColor Red
+ exit 1
+ }
+}
+
+$BinaryName = "apm-windows-$Arch"
+
+Write-Host "Building APM binary for windows-$Arch" -ForegroundColor Blue
+Write-Host "Output binary: $BinaryName" -ForegroundColor Blue
+
+# Clean previous builds
+Write-Host "Cleaning previous builds..." -ForegroundColor Yellow
+if (Test-Path "build/build") { Remove-Item -Recurse -Force "build/build" }
+if (Test-Path "dist") { Remove-Item -Recurse -Force "dist" }
+
+# Check if PyInstaller is available
+try {
+ uv run pyinstaller --version | Out-Null
+} catch {
+ Write-Host "PyInstaller not found. Make sure dependencies are installed with: uv sync --extra build" -ForegroundColor Red
+ exit 1
+}
+
+# Check if UPX is available (optional)
+if (Get-Command upx -ErrorAction SilentlyContinue) {
+ Write-Host "UPX found - binary will be compressed" -ForegroundColor Green
+} else {
+ Write-Host "UPX not found - binary will not be compressed" -ForegroundColor Yellow
+}
+
+# Inject build SHA into version.py
+$VersionFile = "src/apm_cli/version.py"
+$originalContent = Get-Content $VersionFile -Raw
+$BuildSHA = git rev-parse --short HEAD 2>$null
+if ($BuildSHA) {
+ Write-Host "Injecting build SHA: $BuildSHA" -ForegroundColor Yellow
+ $newContent = $originalContent -replace '^__BUILD_SHA__ = None$', "__BUILD_SHA__ = `"$BuildSHA`""
+ Set-Content -Path $VersionFile -Value $newContent -NoNewline
+}
+
+try {
+ # Build binary
+ Write-Host "Building binary with PyInstaller..." -ForegroundColor Yellow
+ uv run pyinstaller build/apm.spec --noconfirm
+ if ($LASTEXITCODE -ne 0) { throw "PyInstaller failed with exit code $LASTEXITCODE" }
+
+ # Check if build was successful (onedir mode creates dist/apm/apm.exe)
+ if (-not (Test-Path "dist/apm/apm.exe")) {
+ Write-Host "Build failed - binary not found" -ForegroundColor Red
+ exit 1
+ }
+
+ # Rename the directory to have the platform-specific name
+ Rename-Item "dist/apm" $BinaryName
+
+ # Test the binary (temporarily relax error preference so stderr from native
+ # commands does not throw under $ErrorActionPreference = "Stop")
+ Write-Host "Testing binary..." -ForegroundColor Yellow
+ $savedPref = $ErrorActionPreference
+ $ErrorActionPreference = "Continue"
+ & "dist/$BinaryName/apm.exe" --version
+ $testExit = $LASTEXITCODE
+ $ErrorActionPreference = $savedPref
+ if ($testExit -eq 0) {
+ Write-Host "Binary test successful" -ForegroundColor Green
+ } else {
+ Write-Host "Binary test failed with exit code $testExit" -ForegroundColor Red
+ exit 1
+ }
+
+ # Show binary info
+ Write-Host "Build complete!" -ForegroundColor Green
+ $size = (Get-ChildItem "dist/$BinaryName" -Recurse | Measure-Object -Property Length -Sum).Sum
+ $sizeMB = [math]::Round($size / 1MB, 1)
+ Write-Host "Binary: dist/$BinaryName/apm.exe" -ForegroundColor Blue
+ Write-Host "Size: ${sizeMB}MB" -ForegroundColor Blue
+
+ # Create checksum
+ $hash = (Get-FileHash "dist/$BinaryName/apm.exe" -Algorithm SHA256).Hash.ToLower()
+ "$hash dist/$BinaryName/apm.exe" | Set-Content "dist/$BinaryName.sha256"
+ Write-Host "Checksum: dist/$BinaryName.sha256" -ForegroundColor Blue
+
+ Write-Host "Ready for release!" -ForegroundColor Green
+} finally {
+ # Restore version.py
+ if ($BuildSHA) {
+ Set-Content -Path $VersionFile -Value $originalContent -NoNewline
+ }
+}
diff --git a/scripts/windows/github-token-helper.ps1 b/scripts/windows/github-token-helper.ps1
new file mode 100644
index 000000000..f581f3403
--- /dev/null
+++ b/scripts/windows/github-token-helper.ps1
@@ -0,0 +1,116 @@
+#
+# GitHub Token Helper - Standalone PowerShell implementation
+#
+# TOKEN PRECEDENCE RULES (AUTHORITATIVE):
+# ======================================
+# 1. GitHub Models: GITHUB_TOKEN > GITHUB_APM_PAT
+# 2. APM Modules: GITHUB_APM_PAT > GITHUB_TOKEN
+#
+# CRITICAL: Never overwrite existing GITHUB_TOKEN (Models access)
+#
+
+# Setup GitHub tokens with proper precedence and preservation
+function Initialize-GitHubToken {
+ param(
+ [switch]$Quiet
+ )
+
+ if (-not $Quiet) {
+ Write-Host "Setting up GitHub tokens..." -ForegroundColor Blue
+ }
+
+ # CRITICAL: Preserve existing GITHUB_TOKEN if set (for Models access)
+ $preserveGithubToken = $null
+ if ($env:GITHUB_TOKEN) {
+ $preserveGithubToken = $env:GITHUB_TOKEN
+ if (-not $Quiet) {
+ Write-Host "$([char]0x2713) Preserving existing GITHUB_TOKEN for Models access ($($env:GITHUB_TOKEN.Length) chars)" -ForegroundColor Green
+ }
+ } else {
+ Write-Host "Warning: No GITHUB_TOKEN found initially" -ForegroundColor Yellow
+ }
+
+ # 2. Setup APM module access
+ # Precedence: GITHUB_APM_PAT > GITHUB_TOKEN
+ if (-not $env:GITHUB_APM_PAT) {
+ if ($env:GITHUB_TOKEN) {
+ $env:GITHUB_APM_PAT = $env:GITHUB_TOKEN
+ }
+ }
+
+ # 3. Setup Models access (GITHUB_TOKEN for Codex, GITHUB_MODELS_KEY for LLM)
+ # Precedence: GITHUB_TOKEN > GITHUB_APM_PAT
+ # CRITICAL: Only set GITHUB_TOKEN if not already present (never overwrite)
+ if (-not $env:GITHUB_TOKEN) {
+ if ($env:GITHUB_APM_PAT) {
+ $env:GITHUB_TOKEN = $env:GITHUB_APM_PAT
+ }
+ }
+
+ # 4. Restore preserved GITHUB_TOKEN (never overwrite Models-enabled token)
+ if ($preserveGithubToken) {
+ $env:GITHUB_TOKEN = $preserveGithubToken
+ }
+
+ # 5. Setup LLM Models key
+ if ($env:GITHUB_TOKEN -and (-not $env:GITHUB_MODELS_KEY)) {
+ $env:GITHUB_MODELS_KEY = $env:GITHUB_TOKEN
+ }
+
+ if (-not $Quiet) {
+ Write-Host "GitHub token environment configured" -ForegroundColor Green
+ }
+}
+
+# Get appropriate token for specific runtime
+function Get-TokenForRuntime {
+ param(
+ [Parameter(Mandatory)]
+ [string]$Runtime
+ )
+
+ switch ($Runtime) {
+ { $_ -in "codex", "models", "llm" } {
+ # Models: GITHUB_TOKEN > GITHUB_APM_PAT
+ if ($env:GITHUB_TOKEN) { return $env:GITHUB_TOKEN }
+ elseif ($env:GITHUB_APM_PAT) { return $env:GITHUB_APM_PAT }
+ }
+ default {
+ # General: GITHUB_APM_PAT > GITHUB_TOKEN
+ if ($env:GITHUB_APM_PAT) { return $env:GITHUB_APM_PAT }
+ elseif ($env:GITHUB_TOKEN) { return $env:GITHUB_TOKEN }
+ }
+ }
+ return $null
+}
+
+# Validate GitHub tokens
+function Test-GitHubToken {
+ $hasAnyToken = $false
+ $hasModelsToken = $false
+
+ if ($env:GITHUB_APM_PAT -or $env:GITHUB_TOKEN) {
+ $hasAnyToken = $true
+ }
+
+ if ($env:GITHUB_TOKEN) {
+ $hasModelsToken = $true
+ }
+
+ if (-not $hasAnyToken) {
+ Write-Host "No GitHub tokens found" -ForegroundColor Red
+ Write-Host "Required: Set one of these environment variables:"
+ Write-Host " GITHUB_TOKEN (user-scoped PAT for GitHub Models)"
+ Write-Host " GITHUB_APM_PAT (fine-grained PAT for APM modules)"
+ return $false
+ }
+
+ if (-not $hasModelsToken) {
+ Write-Host "Warning: No user-scoped PAT found. GitHub Models API may not work with fine-grained PATs." -ForegroundColor Yellow
+ Write-Host "For full functionality, set GITHUB_TOKEN to a user-scoped PAT."
+ return $false
+ }
+
+ Write-Host "GitHub token validation passed" -ForegroundColor Green
+ return $true
+}
diff --git a/scripts/windows/test-dependency-integration.ps1 b/scripts/windows/test-dependency-integration.ps1
new file mode 100644
index 000000000..81ad84612
--- /dev/null
+++ b/scripts/windows/test-dependency-integration.ps1
@@ -0,0 +1,350 @@
+# Extension to build isolation script for APM Dependencies Integration Testing
+# Tests real dependency scenarios with actual GitHub repositories
+# Used in CI pipeline for comprehensive dependency validation
+
+$ErrorActionPreference = "Continue"
+
+# --- Logging functions ---
+
+function Write-DepInfo {
+ param([string]$Message)
+ Write-Host "i $Message" -ForegroundColor Blue
+}
+
+function Write-DepSuccess {
+ param([string]$Message)
+ Write-Host "OK $Message" -ForegroundColor Green
+}
+
+function Write-DepError {
+ param([string]$Message)
+ Write-Host "FAIL $Message" -ForegroundColor Red
+}
+
+function Write-DepTestHeader {
+ param([string]$Message)
+ Write-Host "TEST $Message" -ForegroundColor Yellow
+}
+
+# --- Test real dependency installation ---
+
+function Test-RealDependencyInstallation {
+ param(
+ [string]$TestDir,
+ [string]$ApmBinary
+ )
+
+ Write-DepTestHeader "Testing real dependency installation with microsoft/apm-sample-package"
+
+ Push-Location $TestDir
+ try {
+ # Create apm.yml with real dependency
+ @"
+name: dependency-test-project
+version: 1.0.0
+description: Test project for dependency integration testing
+author: CI Test
+
+dependencies:
+ apm:
+ - microsoft/apm-sample-package
+
+scripts:
+ start: "echo 'Project with apm-sample-package dependency loaded'"
+"@ | Set-Content -Path "apm.yml" -Encoding UTF8
+
+ # Test apm deps list (should show no dependencies initially)
+ Write-DepInfo "Testing 'apm deps list' with no dependencies installed"
+ $depsOutput = & $ApmBinary deps list 2>&1 | Out-String
+ Write-Host "DEBUG: Actual output from 'apm deps list':"
+ Write-Host "--- OUTPUT START ---"
+ Write-Host $depsOutput
+ Write-Host "--- OUTPUT END ---"
+ if ($depsOutput -match "No APM dependencies installed yet") {
+ Write-DepSuccess "Correctly shows no dependencies installed"
+ } else {
+ Write-DepError "Expected 'No APM dependencies installed yet' message"
+ Write-DepError "Got: $depsOutput"
+ return $false
+ }
+
+ # Test apm install (should download real dependency)
+ Write-DepInfo "Testing 'apm install' with real GitHub dependency"
+ & $ApmBinary install
+ if ($LASTEXITCODE -ne 0) {
+ Write-DepError "Failed to install real dependency"
+ return $false
+ }
+
+ # Verify installation
+ if (-not (Test-Path "apm_modules\microsoft\apm-sample-package")) {
+ Write-DepError "Dependency not installed: apm_modules\microsoft\apm-sample-package not found"
+ return $false
+ }
+
+ # Verify dependency structure
+ if (-not (Test-Path "apm_modules\microsoft\apm-sample-package\apm.yml")) {
+ Write-DepError "Dependency missing apm.yml"
+ return $false
+ }
+
+ if (-not (Test-Path "apm_modules\microsoft\apm-sample-package\.apm")) {
+ Write-DepError "Dependency missing .apm directory"
+ return $false
+ }
+
+ # Check for expected prompt files
+ if (-not (Test-Path "apm_modules\microsoft\apm-sample-package\.apm\prompts\design-review.prompt.md")) {
+ Write-DepError "Dependency missing expected prompt file: .apm\prompts\design-review.prompt.md"
+ return $false
+ }
+
+ Write-DepSuccess "Real dependency installation verified"
+
+ # Test apm deps list (should now show installed dependency)
+ Write-DepInfo "Testing 'apm deps list' with installed dependency"
+ $depsOutput = & $ApmBinary deps list 2>&1 | Out-String
+ if ($depsOutput -match "apm-sample-package") {
+ Write-DepSuccess "Correctly shows installed dependency"
+ } else {
+ Write-DepError "Expected to see installed dependency in list"
+ return $false
+ }
+
+ # Test apm deps tree
+ Write-DepInfo "Testing 'apm deps tree'"
+ $treeOutput = & $ApmBinary deps tree 2>&1 | Out-String
+ if ($treeOutput -match "apm-sample-package") {
+ Write-DepSuccess "Dependency tree shows installed dependency"
+ } else {
+ Write-DepError "Expected to see dependency in tree output"
+ return $false
+ }
+
+ # Test apm deps info
+ Write-DepInfo "Testing 'apm deps info apm-sample-package'"
+ $infoOutput = & $ApmBinary deps info apm-sample-package 2>&1 | Out-String
+ if ($infoOutput -match "apm-sample-package") {
+ Write-DepSuccess "Dependency info command works"
+ } else {
+ Write-DepError "Expected dependency info to show package details"
+ return $false
+ }
+
+ Write-DepSuccess "All real dependency tests passed"
+ return $true
+ } finally {
+ Pop-Location
+ }
+}
+
+# --- Test multi-dependency scenario ---
+
+function Test-MultiDependencyScenario {
+ param(
+ [string]$TestDir,
+ [string]$ApmBinary
+ )
+
+ Write-DepTestHeader "Testing multi-dependency scenario with both test repositories"
+
+ Push-Location $TestDir
+ try {
+ # Create apm.yml with multiple dependencies
+ @"
+name: multi-dependency-test
+version: 1.0.0
+description: Test project for multi-dependency scenario
+author: CI Test
+
+dependencies:
+ apm:
+ - microsoft/apm-sample-package
+ - github/awesome-copilot/skills/review-and-refactor
+
+scripts:
+ start: "echo 'Project with multiple dependencies loaded'"
+"@ | Set-Content -Path "apm.yml" -Encoding UTF8
+
+ # Clean any existing dependencies
+ if (Test-Path "apm_modules") {
+ Remove-Item -Recurse -Force "apm_modules" -ErrorAction SilentlyContinue
+ }
+
+ # Install multiple dependencies
+ Write-DepInfo "Installing multiple real dependencies"
+ & $ApmBinary install
+ if ($LASTEXITCODE -ne 0) {
+ Write-DepError "Failed to install multiple dependencies"
+ return $false
+ }
+
+ # Verify both dependencies installed
+ if (-not (Test-Path "apm_modules\microsoft\apm-sample-package")) {
+ Write-DepError "First dependency not installed: apm-sample-package"
+ return $false
+ }
+
+ if (-not (Test-Path "apm_modules\github\awesome-copilot\skills\review-and-refactor")) {
+ Write-DepError "Second dependency not installed: github/awesome-copilot/skills/review-and-refactor"
+ return $false
+ }
+
+ # Test deps list shows both
+ $depsOutput = & $ApmBinary deps list 2>&1 | Out-String
+ if ($depsOutput -notmatch "apm-sample-package") {
+ Write-DepError "Multi-dependency list missing apm-sample-package"
+ return $false
+ }
+
+ if ($depsOutput -notmatch "design-guidelines|apm-sample-package") {
+ Write-DepError "Multi-dependency list missing design-guidelines"
+ return $false
+ }
+
+ Write-DepSuccess "Multi-dependency scenario verified"
+ return $true
+ } finally {
+ Pop-Location
+ }
+}
+
+# --- Test dependency update workflow ---
+
+function Test-DependencyUpdate {
+ param(
+ [string]$TestDir,
+ [string]$ApmBinary
+ )
+
+ Write-DepTestHeader "Testing dependency update workflow"
+
+ Push-Location $TestDir
+ try {
+ # Should have dependencies installed from previous test
+ if (-not (Test-Path "apm_modules")) {
+ Write-DepError "No dependencies found for update test"
+ return $false
+ }
+
+ # Test update all dependencies
+ Write-DepInfo "Testing 'apm deps update' for all dependencies"
+ & $ApmBinary deps update
+ if ($LASTEXITCODE -ne 0) {
+ Write-DepError "Failed to update all dependencies"
+ return $false
+ }
+
+ # Test update specific dependency
+ Write-DepInfo "Testing 'apm deps update apm-sample-package'"
+ & $ApmBinary deps update apm-sample-package
+ if ($LASTEXITCODE -ne 0) {
+ Write-DepError "Failed to update specific dependency"
+ return $false
+ }
+
+ Write-DepSuccess "Dependency update workflow verified"
+ return $true
+ } finally {
+ Pop-Location
+ }
+}
+
+# --- Test dependency cleanup ---
+
+function Test-DependencyCleanup {
+ param(
+ [string]$TestDir,
+ [string]$ApmBinary
+ )
+
+ Write-DepTestHeader "Testing dependency cleanup"
+
+ Push-Location $TestDir
+ try {
+ # Test deps clean
+ Write-DepInfo "Testing 'apm deps clean'"
+ "y" | & $ApmBinary deps clean
+ if ($LASTEXITCODE -ne 0) {
+ Write-DepError "Failed to clean dependencies"
+ return $false
+ }
+
+ # Verify cleanup
+ if (Test-Path "apm_modules") {
+ Write-DepError "apm_modules directory still exists after cleanup"
+ return $false
+ }
+
+ # Verify deps list shows no dependencies
+ $depsOutput = & $ApmBinary deps list 2>&1 | Out-String
+ Write-Host "DEBUG: Actual output from 'apm deps list' after cleanup:"
+ Write-Host "--- OUTPUT START ---"
+ Write-Host $depsOutput
+ Write-Host "--- OUTPUT END ---"
+ if ($depsOutput -match "No APM dependencies installed yet") {
+ Write-DepSuccess "Correctly shows no dependencies after cleanup"
+ } else {
+ Write-DepError "Expected no dependencies after cleanup"
+ Write-DepError "Got: $depsOutput"
+ return $false
+ }
+
+ Write-DepSuccess "Dependency cleanup verified"
+ return $true
+ } finally {
+ Pop-Location
+ }
+}
+
+# --- Main function for dependency integration testing ---
+
+function Test-DependencyIntegration {
+ param(
+ [Parameter(Mandatory)]
+ [string]$BinaryPath
+ )
+
+ Write-DepInfo "=== APM Dependencies Integration Testing ==="
+ Write-DepInfo "Testing with real GitHub repositories:"
+ Write-DepInfo " - microsoft/apm-sample-package"
+ Write-DepInfo " - github/awesome-copilot/skills/review-and-refactor"
+
+ # Create isolated test directory
+ $testDir = Join-Path $env:TEMP "apm-dep-test-$PID"
+ New-Item -ItemType Directory -Path $testDir -Force | Out-Null
+
+ # Check for GitHub token
+ if (-not $env:GITHUB_CLI_PAT -and -not $env:GITHUB_TOKEN) {
+ Write-DepError "GitHub token required for dependency testing"
+ Write-DepInfo "Set GITHUB_CLI_PAT or GITHUB_TOKEN environment variable"
+ return $false
+ }
+
+ try {
+ # Run dependency tests in sequence
+ if (-not (Test-RealDependencyInstallation -TestDir $testDir -ApmBinary $BinaryPath)) { return $false }
+ if (-not (Test-MultiDependencyScenario -TestDir $testDir -ApmBinary $BinaryPath)) { return $false }
+ if (-not (Test-DependencyUpdate -TestDir $testDir -ApmBinary $BinaryPath)) { return $false }
+ if (-not (Test-DependencyCleanup -TestDir $testDir -ApmBinary $BinaryPath)) { return $false }
+
+ Write-DepSuccess "=== All dependency integration tests passed! ==="
+ return $true
+ } finally {
+ # Cleanup
+ if (Test-Path $testDir) {
+ Remove-Item -Recurse -Force $testDir -ErrorAction SilentlyContinue
+ }
+ }
+}
+
+# If run directly (not dot-sourced)
+if ($MyInvocation.InvocationName -ne ".") {
+ if ($args.Count -lt 1) {
+ Write-DepError "Usage: .\test-dependency-integration.ps1 "
+ exit 1
+ }
+
+ $result = Test-DependencyIntegration -BinaryPath $args[0]
+ if (-not $result) { exit 1 }
+}
diff --git a/scripts/windows/test-integration.ps1 b/scripts/windows/test-integration.ps1
new file mode 100644
index 000000000..b14d267d5
--- /dev/null
+++ b/scripts/windows/test-integration.ps1
@@ -0,0 +1,253 @@
+# Integration testing script for Windows CI and local environments
+# PowerShell equivalent of test-integration.sh
+#
+# Tests comprehensive runtime scenarios and edge cases:
+# - pytest-based E2E scenarios with error handling
+# - Hero scenario validation (zero-config, guardrailing)
+# - MCP registry integration
+# - APM Dependencies with real repositories
+#
+# - CI mode: Uses pre-built artifacts from build job
+# - Local mode: Builds binary, runs integration tests
+
+param(
+ [switch]$SkipBuild,
+ [switch]$SkipRuntimes
+)
+
+$ErrorActionPreference = "Stop"
+
+# Source the GitHub token management helper
+$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
+$tokenHelper = Join-Path $ScriptDir "github-token-helper.ps1"
+if (Test-Path $tokenHelper) {
+ . $tokenHelper
+}
+
+#region Logging
+function Write-Info { param([string]$Message) Write-Host "[INFO] $Message" -ForegroundColor Blue }
+function Write-Success { param([string]$Message) Write-Host "[OK] $Message" -ForegroundColor Green }
+function Write-ErrorText { param([string]$Message) Write-Host "[ERROR] $Message" -ForegroundColor Red }
+#endregion
+
+#region Prerequisites
+function Test-Prerequisites {
+ Write-Info "Checking prerequisites..."
+
+ if (Get-Command Initialize-GitHubToken -ErrorAction SilentlyContinue) {
+ Initialize-GitHubToken
+ Write-Success "GitHub tokens configured"
+ }
+
+ if ($env:GITHUB_APM_PAT) { Write-Success "GITHUB_APM_PAT is set (APM module access)" }
+ if ($env:GITHUB_TOKEN) { Write-Success "GITHUB_TOKEN is set (GitHub Models access)" }
+}
+#endregion
+
+#region Platform and Environment Detection
+function Get-BinaryName {
+ $arch = [System.Runtime.InteropServices.RuntimeInformation]::ProcessArchitecture
+ switch ($arch) {
+ "X64" { return "apm-windows-x86_64" }
+ "Arm64" { return "apm-windows-x86_64" } # x86_64 emulation on ARM64
+ default {
+ Write-ErrorText "Unsupported architecture: $arch"
+ exit 1
+ }
+ }
+}
+
+function Find-ExistingBinary {
+ param([string]$BinaryName)
+
+ $binaryPath = Join-Path "." "dist" $BinaryName "apm.exe"
+ if (Test-Path $binaryPath) {
+ Write-Info "Found existing binary: $binaryPath (CI mode)"
+ return $true
+ }
+
+ # Also check for directory-style artifact (download-artifact extracts flat)
+ $flatPath = Join-Path "." $BinaryName "apm.exe"
+ if (Test-Path $flatPath) {
+ Write-Info "Found existing binary: $flatPath (CI mode)"
+ return $true
+ }
+
+ Write-Info "No existing binary found, will build locally"
+ return $false
+}
+#endregion
+
+#region Binary Build and Setup
+function Build-Binary {
+ param([string]$BinaryName)
+
+ Write-Info "=== Building APM binary (local mode) ==="
+
+ Write-Info "Installing build dependencies..."
+ uv sync --extra dev --extra build
+
+ Write-Info "Building binary with PyInstaller..."
+ uv run pyinstaller build/apm.spec --noconfirm
+
+ $binaryPath = Join-Path "." "dist" $BinaryName "apm.exe"
+ if (-not (Test-Path $binaryPath)) {
+ Write-ErrorText "Binary not found after build: $binaryPath"
+ exit 1
+ }
+
+ Write-Success "Binary built: $binaryPath"
+}
+
+function Initialize-BinaryForTesting {
+ param([string]$BinaryName)
+
+ Write-Info "=== Setting up binary for testing ==="
+
+ $binaryDir = Join-Path (Get-Location) "dist" $BinaryName
+ if (-not (Test-Path (Join-Path $binaryDir "apm.exe"))) {
+ # Check flat layout from download-artifact
+ $binaryDir = Join-Path (Get-Location) $BinaryName
+ }
+
+ if (-not (Test-Path (Join-Path $binaryDir "apm.exe"))) {
+ Write-ErrorText "Cannot find apm.exe in $binaryDir"
+ exit 1
+ }
+
+ # Add binary directory to PATH for this session
+ $env:PATH = "$binaryDir;$env:PATH"
+
+ # Verify setup
+ $apmPath = Get-Command apm -ErrorAction SilentlyContinue
+ if (-not $apmPath) {
+ Write-ErrorText "APM not found in PATH after setup"
+ exit 1
+ }
+
+ $version = & apm --version 2>&1
+ Write-Success "APM binary ready for testing: $version"
+}
+#endregion
+
+#region Runtime Setup
+function Initialize-Runtimes {
+ Write-Info "=== Setting up runtimes for integration tests ==="
+
+ Write-Info "Setting up GitHub Copilot CLI runtime..."
+ & apm runtime setup copilot
+ if ($LASTEXITCODE -ne 0) { Write-ErrorText "Failed to set up Copilot runtime"; exit 1 }
+
+ Write-Info "Setting up Codex runtime..."
+ & apm runtime setup codex
+ if ($LASTEXITCODE -ne 0) { Write-ErrorText "Failed to set up Codex runtime"; exit 1 }
+
+ Write-Info "Setting up LLM runtime..."
+ & apm runtime setup llm
+ if ($LASTEXITCODE -ne 0) { Write-ErrorText "Failed to set up LLM runtime"; exit 1 }
+
+ # Add runtime paths to session
+ $runtimeDir = Join-Path $env:USERPROFILE ".apm" "runtimes"
+ $env:PATH = "$runtimeDir;$env:PATH"
+
+ Write-Success "All runtimes configured (Copilot, Codex, LLM)"
+}
+#endregion
+
+#region Integration Tests
+function Invoke-IntegrationTests {
+ Write-Info "=== Running integration tests (mirroring CI) ==="
+ Write-Info "Testing comprehensive runtime scenarios:"
+ Write-Info " - Zero-config auto-install (Hero Scenario 1)"
+ Write-Info " - 2-minute guardrailing (Hero Scenario 2)"
+ Write-Info " - MCP registry integration"
+ Write-Info " - APM Dependencies with real repositories"
+
+ $env:APM_E2E_TESTS = "1"
+
+ Write-Info "Environment:"
+ Write-Host " APM_E2E_TESTS: $env:APM_E2E_TESTS"
+ Write-Host " GITHUB_TOKEN: $(if ($env:GITHUB_TOKEN) { '(set)' } else { '(not set)' })"
+ Write-Host " GITHUB_APM_PAT: $(if ($env:GITHUB_APM_PAT) { '(set)' } else { '(not set)' })"
+ Write-Host " ADO_APM_PAT: $(if ($env:ADO_APM_PAT) { '(set)' } else { '(not set)' })"
+
+ # Hero Scenario 1: Zero-config auto-install
+ Write-Info "Running HERO SCENARIO 1: Zero-config auto-install test..."
+ pytest tests/integration/test_auto_install_e2e.py -v -s --tb=short
+ if ($LASTEXITCODE -ne 0) {
+ Write-ErrorText "Zero-config auto-install tests failed!"
+ exit 1
+ }
+ Write-Success "Zero-config auto-install tests passed!"
+
+ # Hero Scenario 2: 2-minute guardrailing
+ Write-Info "Running HERO SCENARIO 2: 2-minute guardrailing test..."
+ pytest tests/integration/test_guardrailing_hero_e2e.py -v -s --tb=short
+ if ($LASTEXITCODE -ne 0) {
+ Write-ErrorText "2-minute guardrailing tests failed!"
+ exit 1
+ }
+ Write-Success "2-minute guardrailing tests passed!"
+
+ # MCP registry E2E tests
+ Write-Info "Running MCP registry E2E tests..."
+ pytest tests/integration/test_mcp_registry_e2e.py -v -s --tb=short
+ if ($LASTEXITCODE -ne 0) {
+ Write-ErrorText "MCP registry tests failed!"
+ exit 1
+ }
+ Write-Success "MCP registry tests passed!"
+
+ # APM Dependencies integration tests
+ Write-Info "Running APM Dependencies integration tests..."
+ pytest tests/integration/test_apm_dependencies.py -v -s --tb=short -m integration
+ if ($LASTEXITCODE -ne 0) {
+ Write-ErrorText "APM Dependencies integration tests failed!"
+ exit 1
+ }
+ Write-Success "APM Dependencies integration tests passed!"
+
+ # Azure DevOps E2E tests (conditional)
+ if ($env:ADO_APM_PAT) {
+ Write-Info "Running Azure DevOps E2E tests..."
+ pytest tests/integration/test_ado_e2e.py -v -s --tb=short
+ if ($LASTEXITCODE -ne 0) {
+ Write-ErrorText "Azure DevOps E2E tests failed!"
+ exit 1
+ }
+ Write-Success "Azure DevOps E2E tests passed!"
+ } else {
+ Write-Info "Skipping Azure DevOps E2E tests (ADO_APM_PAT not set)"
+ }
+
+ Write-Success "All integration test suites completed successfully!"
+}
+#endregion
+
+#region Main
+Write-Host "APM CLI Integration Testing - Windows" -ForegroundColor Cyan
+Write-Host "======================================" -ForegroundColor Cyan
+Write-Host ""
+
+Test-Prerequisites
+
+$binaryName = Get-BinaryName
+$hasExisting = Find-ExistingBinary -BinaryName $binaryName
+
+if (-not $hasExisting -and -not $SkipBuild) {
+ Build-Binary -BinaryName $binaryName
+} elseif (-not $hasExisting -and $SkipBuild) {
+ Write-ErrorText "No binary found and -SkipBuild specified"
+ exit 1
+}
+
+Initialize-BinaryForTesting -BinaryName $binaryName
+
+if (-not $SkipRuntimes) {
+ Initialize-Runtimes
+}
+
+Invoke-IntegrationTests
+
+Write-Success "All integration tests completed successfully!"
+#endregion
diff --git a/scripts/windows/test-release-validation.ps1 b/scripts/windows/test-release-validation.ps1
new file mode 100644
index 000000000..48b613234
--- /dev/null
+++ b/scripts/windows/test-release-validation.ps1
@@ -0,0 +1,499 @@
+# Release validation script - Final pre-release testing (PowerShell)
+# Tests the EXACT user experience with the shipped binary in complete isolation:
+# 1. Download/extract binary (as users would)
+# 2. apm runtime setup codex
+# 3. apm init my-ai-native-project
+# 4. cd my-ai-native-project && apm compile
+# 5. apm install
+# 6. apm run start --param name=""
+#
+# Environment: Complete isolation - NO source code, only the binary
+# Purpose: Validate that end-users will have a successful experience
+# This is the final gate before release - testing the actual product as shipped
+
+param(
+ [string]$BinaryPath
+)
+
+$ErrorActionPreference = "Continue"
+
+# --- Logging functions ---
+
+function Write-Info {
+ param([string]$Message)
+ Write-Host "i $Message" -ForegroundColor Blue
+}
+
+function Write-Success {
+ param([string]$Message)
+ Write-Host "OK $Message" -ForegroundColor Green
+}
+
+function Write-ErrorText {
+ param([string]$Message)
+ Write-Host "FAIL $Message" -ForegroundColor Red
+}
+
+function Write-TestHeader {
+ param([string]$Message)
+ Write-Host "TEST $Message" -ForegroundColor Yellow
+}
+
+# --- Source helpers ---
+
+. "$PSScriptRoot\github-token-helper.ps1"
+
+$script:DEPENDENCY_TESTS_AVAILABLE = $false
+$depIntegrationScript = Join-Path $PSScriptRoot "test-dependency-integration.ps1"
+if (Test-Path $depIntegrationScript) {
+ . $depIntegrationScript
+ $script:DEPENDENCY_TESTS_AVAILABLE = $true
+}
+
+# --- Global state ---
+
+$script:BINARY_PATH = ""
+$script:testDir = ""
+
+# --- Helper: run with timeout ---
+
+function Invoke-WithTimeout {
+ param(
+ [int]$Seconds,
+ [string]$Command,
+ [string[]]$Arguments
+ )
+ $process = Start-Process -FilePath $Command -ArgumentList $Arguments -NoNewWindow -PassThru -RedirectStandardOutput "$env:TEMP\apm-timeout-stdout.txt" -RedirectStandardError "$env:TEMP\apm-timeout-stderr.txt"
+ if (-not $process.WaitForExit($Seconds * 1000)) {
+ $process.Kill()
+ if (Test-Path "$env:TEMP\apm-timeout-stdout.txt") { Get-Content "$env:TEMP\apm-timeout-stdout.txt" }
+ if (Test-Path "$env:TEMP\apm-timeout-stderr.txt") { Get-Content "$env:TEMP\apm-timeout-stderr.txt" }
+ return 124 # timeout code
+ }
+ if (Test-Path "$env:TEMP\apm-timeout-stdout.txt") { Get-Content "$env:TEMP\apm-timeout-stdout.txt" }
+ if (Test-Path "$env:TEMP\apm-timeout-stderr.txt") { Get-Content "$env:TEMP\apm-timeout-stderr.txt" }
+ return $process.ExitCode
+}
+
+# --- Find binary ---
+
+function Find-Binary {
+ param([string]$Path)
+
+ if ($Path) {
+ if (-not (Test-Path $Path)) {
+ Write-ErrorText "Binary not found at specified path: $Path"
+ exit 1
+ }
+ $script:BINARY_PATH = (Resolve-Path $Path).Path
+ } elseif (Test-Path ".\apm.exe") {
+ $script:BINARY_PATH = (Resolve-Path ".\apm.exe").Path
+ } else {
+ $cmd = Get-Command apm -ErrorAction SilentlyContinue
+ if ($cmd) {
+ $script:BINARY_PATH = $cmd.Source
+ } else {
+ Write-ErrorText "APM binary not found. Usage: .\test-release-validation.ps1 [path-to-binary]"
+ exit 1
+ }
+ }
+
+ Write-Info "Testing binary: $script:BINARY_PATH"
+}
+
+# --- Prerequisites ---
+
+function Test-Prerequisite {
+ Write-TestHeader "Prerequisites: GitHub token"
+
+ Initialize-GitHubToken
+ # Initialize-GitHubToken doesn't return failure — check tokens after setup
+ if ($env:GITHUB_TOKEN -or $env:GITHUB_APM_PAT) {
+ Write-Success "GitHub tokens configured successfully"
+
+ if ($env:GITHUB_APM_PAT) {
+ Write-Success "GITHUB_APM_PAT is set (APM module access)"
+ }
+ if ($env:GITHUB_TOKEN) {
+ Write-Success "GITHUB_TOKEN is set (GitHub Models access)"
+ }
+ return $true
+ } else {
+ Write-ErrorText "GitHub token setup failed"
+ return $false
+ }
+}
+
+# --- Test: basic commands ---
+
+function Test-BasicCommand {
+ Write-TestHeader "Sanity check: Basic commands"
+
+ # Test --version
+ Write-Host "Running: $script:BINARY_PATH --version"
+ Write-Host "--- Command Output Start ---"
+ $result = & $script:BINARY_PATH --version 2>&1
+ $versionExitCode = $LASTEXITCODE
+ $result | Out-Host
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $versionExitCode"
+
+ if ($versionExitCode -ne 0) {
+ Write-ErrorText "apm --version failed with exit code $versionExitCode"
+ return $false
+ }
+
+ # Test --help
+ Write-Host "Running: $script:BINARY_PATH --help"
+ Write-Host "--- Command Output Start ---"
+ $result = & $script:BINARY_PATH --help 2>&1
+ $helpExitCode = $LASTEXITCODE
+ $result | Select-Object -First 20 | Out-Host
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $helpExitCode"
+
+ if ($helpExitCode -ne 0) {
+ Write-ErrorText "apm --help failed with exit code $helpExitCode"
+ return $false
+ }
+
+ Write-Success "Basic commands work"
+ return $true
+}
+
+# --- Test: runtime setup ---
+
+function Test-RuntimeSetup {
+ Write-TestHeader "README Step 2: apm runtime setup"
+
+ # Install GitHub Copilot CLI
+ Write-Host "Running: $script:BINARY_PATH runtime setup copilot"
+ Write-Host "--- Command Output Start ---"
+ $result = & $script:BINARY_PATH runtime setup copilot 2>&1
+ $exitCode = $LASTEXITCODE
+ $result | Out-Host
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $exitCode"
+
+ if ($exitCode -ne 0) {
+ Write-ErrorText "apm runtime setup copilot failed with exit code $exitCode"
+ return $false
+ }
+
+ Write-Success "Copilot CLI runtime setup completed"
+
+ # Also install Codex CLI
+ Write-Host "Running: $script:BINARY_PATH runtime setup codex"
+ Write-Host "--- Command Output Start ---"
+ $result = & $script:BINARY_PATH runtime setup codex 2>&1
+ $exitCode = $LASTEXITCODE
+ $result | Out-Host
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $exitCode"
+
+ if ($exitCode -ne 0) {
+ Write-ErrorText "apm runtime setup codex failed with exit code $exitCode"
+ return $false
+ }
+
+ Write-Success "Codex CLI runtime setup completed"
+ Write-Success "Both runtimes (Copilot, Codex) configured successfully"
+ return $true
+}
+
+# --- HERO SCENARIO 1: 30-Second Zero-Config ---
+
+function Test-HeroZeroConfig {
+ Write-TestHeader "HERO SCENARIO 1: 30-Second Zero-Config (README lines 35-44)"
+
+ # Create temporary directory for this test
+ New-Item -ItemType Directory -Path "zero-config-test" -Force | Out-Null
+ Push-Location "zero-config-test"
+
+ try {
+ # Runtime setup is already done in Test-RuntimeSetup
+ # Just test the virtual package run
+ Write-Host "Running: $script:BINARY_PATH run github/awesome-copilot/skills/architecture-blueprint-generator (with 15s timeout)"
+ Write-Host "--- Command Output Start ---"
+ $exitCode = Invoke-WithTimeout -Seconds 15 -Command $script:BINARY_PATH -Arguments @("run", "github/awesome-copilot/skills/architecture-blueprint-generator")
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $exitCode"
+
+ if ($exitCode -eq 124) {
+ # Timeout is expected and OK (prompt execution started)
+ Write-Success "Zero-config auto-install worked! Package installed and prompt started."
+ } elseif ($exitCode -eq 0) {
+ Write-Success "Zero-config auto-install completed successfully"
+ } else {
+ Write-ErrorText "Zero-config auto-install failed immediately with exit code $exitCode"
+ return $false
+ }
+
+ # Verify package was actually installed
+ if (-not (Test-Path "apm_modules\github\awesome-copilot\skills\architecture-blueprint-generator")) {
+ Write-ErrorText "Package was not installed by auto-install"
+ return $false
+ }
+
+ Write-Success "Package auto-installed to apm_modules/"
+
+ # Test second run (should use cached package, no re-download)
+ Write-Host "Testing second run (should use cache)..."
+ $secondExitCode = Invoke-WithTimeout -Seconds 10 -Command $script:BINARY_PATH -Arguments @("run", "github/awesome-copilot/skills/architecture-blueprint-generator")
+
+ if ($secondExitCode -eq 124 -or $secondExitCode -eq 0) {
+ Write-Success "Second run used cached package (fast, no re-download)"
+ }
+
+ Write-Success "HERO SCENARIO 1: 30-second zero-config PASSED"
+ return $true
+ } finally {
+ Pop-Location
+ }
+}
+
+# --- HERO SCENARIO 2: 2-Minute Guardrailing ---
+
+function Test-HeroGuardrailing {
+ Write-TestHeader "HERO SCENARIO 2: 2-Minute Guardrailing (README lines 46-60)"
+
+ # Step 1: apm init my-project
+ Write-Host "Running: $script:BINARY_PATH init my-project --yes"
+ Write-Host "--- Command Output Start ---"
+ $result = & $script:BINARY_PATH init my-project --yes 2>&1
+ $exitCode = $LASTEXITCODE
+ $result | Out-Host
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $exitCode"
+
+ if ($exitCode -ne 0) {
+ Write-ErrorText "apm init my-project failed with exit code $exitCode"
+ return $false
+ }
+
+ if (-not (Test-Path "my-project") -or -not (Test-Path "my-project\apm.yml")) {
+ Write-ErrorText "my-project directory or apm.yml not created"
+ return $false
+ }
+
+ Write-Success "Project initialized"
+
+ Push-Location "my-project"
+
+ try {
+ # Step 2: apm install microsoft/apm-sample-package
+ Write-Host "Running: $script:BINARY_PATH install microsoft/apm-sample-package"
+ Write-Host "--- Command Output Start ---"
+ $result = & $script:BINARY_PATH install microsoft/apm-sample-package 2>&1
+ $exitCode = $LASTEXITCODE
+ $result | Out-Host
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $exitCode"
+
+ if ($exitCode -ne 0) {
+ Write-ErrorText "apm install microsoft/apm-sample-package failed"
+ return $false
+ }
+
+ Write-Success "design-guidelines installed"
+
+ # Step 3: apm install github/awesome-copilot/skills/review-and-refactor
+ Write-Host "Running: $script:BINARY_PATH install github/awesome-copilot/skills/review-and-refactor"
+ Write-Host "--- Command Output Start ---"
+ $result = & $script:BINARY_PATH install github/awesome-copilot/skills/review-and-refactor 2>&1
+ $exitCode = $LASTEXITCODE
+ $result | Out-Host
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $exitCode"
+
+ if ($exitCode -ne 0) {
+ Write-ErrorText "apm install github/awesome-copilot/skills/review-and-refactor failed"
+ return $false
+ }
+
+ Write-Success "virtual package installed"
+
+ # Step 4: apm compile
+ Write-Host "Running: $script:BINARY_PATH compile"
+ Write-Host "--- Command Output Start ---"
+ $result = & $script:BINARY_PATH compile 2>&1
+ $exitCode = $LASTEXITCODE
+ $result | Out-Host
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $exitCode"
+
+ if ($exitCode -ne 0) {
+ Write-ErrorText "apm compile failed"
+ return $false
+ }
+
+ if (-not (Test-Path "AGENTS.md")) {
+ Write-ErrorText "AGENTS.md not created by compile"
+ return $false
+ }
+
+ Write-Success "Compiled to AGENTS.md (guardrails active)"
+
+ # Step 5: apm run design-review (from installed package)
+ Write-Host "Running: $script:BINARY_PATH run design-review (with 10s timeout)"
+ Write-Host "--- Command Output Start ---"
+ $exitCode = Invoke-WithTimeout -Seconds 10 -Command $script:BINARY_PATH -Arguments @("run", "design-review")
+ Write-Host "--- Command Output End ---"
+ Write-Host "Exit code: $exitCode"
+
+ if ($exitCode -eq 124) {
+ # Timeout is expected and OK - prompt started executing
+ Write-Success "design-review prompt executed with compiled guardrails"
+ } elseif ($exitCode -eq 0) {
+ Write-Success "design-review completed successfully"
+ } else {
+ Write-ErrorText "apm run design-review failed immediately"
+ return $false
+ }
+
+ Write-Success "HERO SCENARIO 2: 2-minute guardrailing PASSED"
+ return $true
+ } finally {
+ Pop-Location
+ }
+}
+
+# --- Main ---
+
+function Main {
+ Write-Host "APM CLI Release Validation - Binary Isolation Testing"
+ Write-Host "====================================================="
+ Write-Host ""
+ Write-Host "Testing the EXACT user experience with the shipped binary"
+ Write-Host "Environment: Complete isolation (no source code access)"
+ Write-Host "Purpose: Final validation before release"
+ Write-Host ""
+
+ Find-Binary -Path $BinaryPath
+
+ # Test binary accessibility first
+ Write-Host "Testing binary accessibility..."
+ if (-not (Test-Path $script:BINARY_PATH)) {
+ Write-ErrorText "Binary file does not exist: $script:BINARY_PATH"
+ exit 1
+ }
+
+ Write-Host "Binary found: $script:BINARY_PATH"
+
+ $testsPassed = 0
+ $testsTotal = 5 # Prerequisites, basic commands, runtime setup, 2 hero scenarios
+ $dependencyTestsRun = $false
+
+ # Add dependency tests to total if available and GITHUB token is present
+ if ($script:DEPENDENCY_TESTS_AVAILABLE -and ($env:GITHUB_CLI_PAT -or $env:GITHUB_TOKEN)) {
+ $testsTotal++
+ $dependencyTestsRun = $true
+ Write-Info "Dependency integration tests will be included"
+ } elseif ($script:DEPENDENCY_TESTS_AVAILABLE) {
+ Write-Info "Dependency integration tests available but no GitHub token - skipping"
+ } else {
+ Write-Info "Dependency integration tests not available - skipping"
+ }
+
+ # Create isolated test directory
+ $script:testDir = "binary-golden-scenario-$PID"
+ New-Item -ItemType Directory -Path $script:testDir | Out-Null
+ Push-Location $script:testDir
+
+ try {
+ # Run prerequisites and basic tests
+ if (Test-Prerequisite) {
+ $testsPassed++
+ } else {
+ Write-ErrorText "Prerequisites check failed"
+ }
+
+ if (Test-BasicCommand) {
+ $testsPassed++
+ } else {
+ Write-ErrorText "Basic commands test failed"
+ }
+
+ if (Test-RuntimeSetup) {
+ $testsPassed++
+ } else {
+ Write-ErrorText "Runtime setup test failed"
+ }
+
+ # HERO SCENARIO 1: 30-second zero-config
+ if (Test-HeroZeroConfig) {
+ $testsPassed++
+ } else {
+ Write-ErrorText "Hero scenario 1 (30-sec zero-config) failed"
+ }
+
+ # HERO SCENARIO 2: 2-minute guardrailing
+ if (Test-HeroGuardrailing) {
+ $testsPassed++
+ } else {
+ Write-ErrorText "Hero scenario 2 (2-min guardrailing) failed"
+ }
+
+ # Run dependency integration tests if available and GitHub token is set
+ if ($dependencyTestsRun) {
+ Write-Info "Running dependency integration tests with real GitHub repositories"
+ if (Test-DependencyIntegration -BinaryPath $script:BINARY_PATH) {
+ $testsPassed++
+ Write-Success "Dependency integration tests passed"
+ } else {
+ Write-ErrorText "Dependency integration tests failed"
+ }
+ }
+ } finally {
+ Pop-Location
+ # Cleanup test directory
+ if ($script:testDir -and (Test-Path $script:testDir)) {
+ Write-Host "Cleaning up test directory: $script:testDir"
+ Remove-Item -Recurse -Force $script:testDir -ErrorAction SilentlyContinue
+ }
+ }
+
+ Write-Host ""
+ Write-Host "Results: $testsPassed/$testsTotal tests passed"
+
+ if ($testsPassed -eq $testsTotal) {
+ Write-Host "RELEASE VALIDATION PASSED!" -ForegroundColor Green
+ Write-Host ""
+ Write-Host "Binary is ready for production release"
+ Write-Host "End-user experience validated successfully"
+ Write-Host "Both README hero scenarios work perfectly"
+ Write-Host ""
+ Write-Host "Validated user journeys:"
+ Write-Host " 1. Prerequisites (GITHUB_TOKEN)"
+ Write-Host " 2. Binary accessibility"
+ Write-Host " 3. Runtime setup (copilot)"
+ Write-Host ""
+ Write-Host " HERO SCENARIO 1: 30-Second Zero-Config"
+ Write-Host " - Run virtual package directly"
+ Write-Host " - Auto-install on first run"
+ Write-Host " - Use cached package on second run"
+ Write-Host ""
+ Write-Host " HERO SCENARIO 2: 2-Minute Guardrailing"
+ Write-Host " - Project initialization"
+ Write-Host " - Install APM packages"
+ Write-Host " - Compile to AGENTS.md guardrails"
+ Write-Host " - Run prompts with guardrails"
+ if ($dependencyTestsRun) {
+ Write-Host ""
+ Write-Host " BONUS: Real dependency integration"
+ }
+ Write-Host ""
+ Write-Success "README Hero Scenarios work perfectly!"
+ Write-Host ""
+ Write-Host "The binary delivers the exact README experience - real users will love it!"
+ exit 0
+ } else {
+ Write-ErrorText "Some tests failed"
+ Write-Host ""
+ Write-Host "The binary doesn't match the README promise"
+ exit 1
+ }
+}
+
+# Run main function
+Main
diff --git a/src/apm_cli/adapters/client/codex.py b/src/apm_cli/adapters/client/codex.py
index 88b5e97e8..4f8b7fc06 100644
--- a/src/apm_cli/adapters/client/codex.py
+++ b/src/apm_cli/adapters/client/codex.py
@@ -122,7 +122,7 @@ def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_o
# If server has only remote endpoints and no packages, it's a remote-only server
if remotes and not packages:
- print(f"⚠️ Warning: MCP server '{server_url}' is a remote server (SSE type)")
+ print(f"[!] Warning: MCP server '{server_url}' is a remote server (SSE type)")
print(" Codex CLI only supports local servers with command/args configuration")
print(" Remote servers are not supported by Codex CLI")
print(" Skipping installation for Codex CLI")
@@ -174,7 +174,7 @@ def _format_server_config(self, server_info, env_overrides=None, runtime_vars=No
"id": server_info.get("id", "") # Add registry UUID for conflict detection
}
- # Self-defined stdio deps carry raw command/args — use directly
+ # Self-defined stdio deps carry raw command/args -- use directly
raw = server_info.get("_raw_stdio")
if raw:
config["command"] = raw["command"]
@@ -328,7 +328,7 @@ def _process_environment_variables(self, env_vars, env_overrides=None):
# Check for CI/automated environment via APM_E2E_TESTS flag (more reliable than TTY detection)
if os.getenv('APM_E2E_TESTS') == '1':
skip_prompting = True
- print(f"💡 APM_E2E_TESTS detected, will skip environment variable prompts")
+ print(f" APM_E2E_TESTS detected, will skip environment variable prompts")
# Also skip prompting if we're in a non-interactive environment (fallback)
is_interactive = sys.stdin.isatty() and sys.stdout.isatty()
diff --git a/src/apm_cli/adapters/client/copilot.py b/src/apm_cli/adapters/client/copilot.py
index 8cbcc73d3..77dfcae9d 100644
--- a/src/apm_cli/adapters/client/copilot.py
+++ b/src/apm_cli/adapters/client/copilot.py
@@ -166,7 +166,7 @@ def _format_server_config(self, server_info, env_overrides=None, runtime_vars=No
"id": server_info.get("id", "") # Add registry UUID for conflict detection
}
- # Self-defined stdio deps carry raw command/args — use directly
+ # Self-defined stdio deps carry raw command/args -- use directly
raw = server_info.get("_raw_stdio")
if raw:
config["command"] = raw["command"]
@@ -331,7 +331,7 @@ def _resolve_environment_variables(self, env_vars, env_overrides=None):
# Check for CI/automated environment via APM_E2E_TESTS flag (more reliable than TTY detection)
if os.getenv('APM_E2E_TESTS') == '1':
skip_prompting = True
- print(f"💡 APM_E2E_TESTS detected, will skip environment variable prompts")
+ print(f" APM_E2E_TESTS detected, will skip environment variable prompts")
# Also skip prompting if we're in a non-interactive environment (fallback)
is_interactive = sys.stdin.isatty() and sys.stdout.isatty()
diff --git a/src/apm_cli/adapters/client/vscode.py b/src/apm_cli/adapters/client/vscode.py
index ad722ac65..91372cf13 100644
--- a/src/apm_cli/adapters/client/vscode.py
+++ b/src/apm_cli/adapters/client/vscode.py
@@ -187,7 +187,7 @@ def _format_server_config(self, server_info):
server_config = {}
input_vars = []
- # Self-defined stdio deps carry raw command/args — use directly
+ # Self-defined stdio deps carry raw command/args -- use directly
raw = server_info.get("_raw_stdio")
if raw:
server_config = {
diff --git a/src/apm_cli/bundle/lockfile_enrichment.py b/src/apm_cli/bundle/lockfile_enrichment.py
index 2adc42145..ded692c9b 100644
--- a/src/apm_cli/bundle/lockfile_enrichment.py
+++ b/src/apm_cli/bundle/lockfile_enrichment.py
@@ -12,7 +12,7 @@ def enrich_lockfile_for_pack(
) -> str:
"""Create an enriched copy of the lockfile YAML with a ``pack:`` section.
- Does NOT mutate the original *lockfile* object — serialises a copy and
+ Does NOT mutate the original *lockfile* object -- serialises a copy and
prepends the pack metadata.
Args:
diff --git a/src/apm_cli/bundle/packer.py b/src/apm_cli/bundle/packer.py
index 9e53a8a70..d14de3f18 100644
--- a/src/apm_cli/bundle/packer.py
+++ b/src/apm_cli/bundle/packer.py
@@ -1,4 +1,4 @@
-"""Bundle packer — creates self-contained APM bundles from the resolved dependency tree."""
+"""Bundle packer -- creates self-contained APM bundles from the resolved dependency tree."""
import shutil
import tarfile
@@ -49,8 +49,8 @@ def pack_bundle(
Args:
project_root: Root of the project containing ``apm.lock`` and ``apm.yml``.
output_dir: Directory where the bundle will be created.
- fmt: Bundle format — ``"apm"`` (default) or ``"plugin"``.
- target: Target filter — ``"vscode"``, ``"claude"``, ``"all"``, or *None*
+ fmt: Bundle format -- ``"apm"`` (default) or ``"plugin"``.
+ target: Target filter -- ``"vscode"``, ``"claude"``, ``"all"``, or *None*
(auto-detect from apm.yml / project structure).
archive: If *True*, produce a ``.tar.gz`` and remove the directory.
dry_run: If *True*, resolve the file list but write nothing to disk.
@@ -67,7 +67,7 @@ def pack_bundle(
lockfile = LockFile.read(lockfile_path)
if lockfile is None:
raise FileNotFoundError(
- "apm.lock not found — run 'apm install' first to resolve dependencies."
+ "apm.lock not found -- run 'apm install' first to resolve dependencies."
)
# 2. Read apm.yml for name / version / config target
@@ -88,7 +88,7 @@ def pack_bundle(
explicit_target=target,
config_target=config_target,
)
- # For packing purposes, "minimal" means nothing to pack — treat as "all"
+ # For packing purposes, "minimal" means nothing to pack -- treat as "all"
if effective_target == "minimal":
effective_target = "all"
@@ -126,7 +126,7 @@ def pack_bundle(
missing.append(rel_path)
if missing:
raise ValueError(
- f"The following deployed files are missing on disk — "
+ f"The following deployed files are missing on disk -- "
f"run 'apm install' to restore them:\n"
+ "\n".join(f" - {m}" for m in missing)
)
diff --git a/src/apm_cli/bundle/unpacker.py b/src/apm_cli/bundle/unpacker.py
index a851bd579..d71521a68 100644
--- a/src/apm_cli/bundle/unpacker.py
+++ b/src/apm_cli/bundle/unpacker.py
@@ -1,4 +1,4 @@
-"""Bundle unpacker — extracts and verifies APM bundles."""
+"""Bundle unpacker -- extracts and verifies APM bundles."""
import shutil
import sys
@@ -65,7 +65,7 @@ def unpack_bundle(
if sys.version_info >= (3, 12):
tar.extractall(temp_dir, filter="data")
else:
- tar.extractall(temp_dir) # noqa: S202 — manual checks above
+ tar.extractall(temp_dir) # noqa: S202 -- manual checks above
except Exception:
shutil.rmtree(temp_dir, ignore_errors=True)
raise
@@ -89,10 +89,10 @@ def unpack_bundle(
if lockfile is None:
if not lockfile_path.exists():
raise FileNotFoundError(
- "apm.lock not found in the bundle — the bundle may be incomplete."
+ "apm.lock not found in the bundle -- the bundle may be incomplete."
)
raise FileNotFoundError(
- "apm.lock in the bundle could not be parsed — the bundle may be corrupt."
+ "apm.lock in the bundle could not be parsed -- the bundle may be corrupt."
)
# Collect deployed_files per dependency and deduplicated global list
@@ -118,7 +118,7 @@ def unpack_bundle(
]
if missing:
raise ValueError(
- "Bundle verification failed — the following deployed files "
+ "Bundle verification failed -- the following deployed files "
"are missing from the bundle:\n"
+ "\n".join(f" - {m}" for m in missing)
)
@@ -142,7 +142,7 @@ def unpack_bundle(
for rel_path in unique_files:
# Guard against absolute paths or path-traversal entries in deployed_files
p = Path(rel_path)
- if p.is_absolute() or ".." in p.parts:
+ if p.is_absolute() or rel_path.startswith("/") or ".." in p.parts:
raise ValueError(
f"Refusing to unpack unsafe path from bundle lockfile: {rel_path!r}"
)
diff --git a/src/apm_cli/cli.py b/src/apm_cli/cli.py
index c15bdba69..d9352c601 100644
--- a/src/apm_cli/cli.py
+++ b/src/apm_cli/cli.py
@@ -1,6 +1,6 @@
"""Command-line interface for Agent Package Manager (APM).
-Thin wiring layer — all command logic lives in ``apm_cli.commands.*`` modules.
+Thin wiring layer -- all command logic lives in ``apm_cli.commands.*`` modules.
"""
import sys
diff --git a/src/apm_cli/commands/_helpers.py b/src/apm_cli/commands/_helpers.py
index ff1855cfd..578f27ec2 100644
--- a/src/apm_cli/commands/_helpers.py
+++ b/src/apm_cli/commands/_helpers.py
@@ -120,7 +120,7 @@ def _build_expected_install_paths(declared_deps, lockfile, apm_modules_dir: Path
install_path = dep.get_install_path(apm_modules_dir)
try:
relative_path = install_path.relative_to(apm_modules_dir)
- expected.add(str(relative_path))
+ expected.add(relative_path.as_posix())
except ValueError:
expected.add(str(install_path))
@@ -136,7 +136,7 @@ def _build_expected_install_paths(declared_deps, lockfile, apm_modules_dir: Path
install_path = dep_ref.get_install_path(apm_modules_dir)
try:
relative_path = install_path.relative_to(apm_modules_dir)
- expected.add(str(relative_path))
+ expected.add(relative_path.as_posix())
except ValueError:
pass
return expected
diff --git a/src/apm_cli/commands/compile.py b/src/apm_cli/commands/compile.py
index 3244f91a3..f7ac5b1e4 100644
--- a/src/apm_cli/commands/compile.py
+++ b/src/apm_cli/commands/compile.py
@@ -32,7 +32,7 @@ def _display_validation_errors(errors):
from rich.table import Table
error_table = Table(
- title="❌ Primitive Validation Errors",
+ title="[x] Primitive Validation Errors",
show_header=True,
header_style="bold red",
)
@@ -64,7 +64,7 @@ def _display_validation_errors(errors):
# Fallback to simple text output
_rich_error("Validation errors found:")
for error in errors:
- click.echo(f" ❌ {error}")
+ click.echo(f" [x] {error}")
def _get_validation_suggestion(error_msg):
@@ -142,7 +142,7 @@ def _recompile(self, changed_file):
else:
_rich_error("Recompilation failed")
for error in result.errors:
- click.echo(f" ❌ {error}")
+ click.echo(f" [x] {error}")
except Exception as e:
_rich_error(f"Error during recompilation: {e}")
@@ -187,7 +187,7 @@ def _recompile(self, changed_file):
# Start watching
observer.start()
_rich_info(
- f"👀 Watching for changes in: {', '.join(watch_paths)}", symbol="eyes"
+ f" Watching for changes in: {', '.join(watch_paths)}", symbol="eyes"
)
_rich_info("Press Ctrl+C to stop watching...", symbol="info")
@@ -217,7 +217,7 @@ def _recompile(self, changed_file):
else:
_rich_error("Initial compilation failed")
for error in result.errors:
- click.echo(f" ❌ {error}")
+ click.echo(f" [x] {error}")
try:
while True:
@@ -315,23 +315,23 @@ def compile(
Use --single-agents for traditional single-file compilation when needed.
Target platforms:
- • vscode/agents: Generates AGENTS.md + .github/ structure (VSCode/GitHub Copilot)
- • claude: Generates CLAUDE.md + .claude/ structure (Claude Code)
- • all: Generates both targets (default)
+ * vscode/agents: Generates AGENTS.md + .github/ structure (VSCode/GitHub Copilot)
+ * claude: Generates CLAUDE.md + .claude/ structure (Claude Code)
+ * all: Generates both targets (default)
Advanced options:
- • --dry-run: Preview compilation without writing files (shows placement decisions)
- • --verbose: Show detailed source attribution and optimizer analysis
- • --local-only: Ignore dependencies, compile only local .apm/ primitives
- • --clean: Remove orphaned AGENTS.md files that are no longer generated
+ * --dry-run: Preview compilation without writing files (shows placement decisions)
+ * --verbose: Show detailed source attribution and optimizer analysis
+ * --local-only: Ignore dependencies, compile only local .apm/ primitives
+ * --clean: Remove orphaned AGENTS.md files that are no longer generated
"""
try:
# Check if this is an APM project first
from pathlib import Path
if not Path("apm.yml").exists():
- _rich_error("❌ Not an APM project - no apm.yml found")
- _rich_info("💡 To initialize an APM project, run:")
+ _rich_error("[x] Not an APM project - no apm.yml found")
+ _rich_info(" To initialize an APM project, run:")
_rich_info(" apm init")
sys.exit(1)
@@ -362,13 +362,13 @@ def compile(
)
if has_empty_apm:
- _rich_error("❌ No instruction files found in .apm/ directory")
- _rich_info("💡 To add instructions, create files like:")
+ _rich_error("[x] No instruction files found in .apm/ directory")
+ _rich_info(" To add instructions, create files like:")
_rich_info(" .apm/instructions/coding-standards.instructions.md")
_rich_info(" .apm/chatmodes/backend-engineer.chatmode.md")
else:
- _rich_error("❌ No APM content found to compile")
- _rich_info("💡 To get started:")
+ _rich_error("[x] No APM content found to compile")
+ _rich_info(" To get started:")
_rich_info(" 1. Install APM dependencies: apm install /")
_rich_info(
" 2. Or create local instructions: mkdir -p .apm/instructions"
@@ -386,7 +386,7 @@ def compile(
primitives = discover_primitives(".")
except Exception as e:
_rich_error(f"Failed to discover primitives: {e}")
- _rich_info(f"💡 Error details: {type(e).__name__}")
+ _rich_info(f" Error details: {type(e).__name__}")
sys.exit(1)
validation_errors = compiler.validate_primitives(primitives)
if validation_errors:
@@ -395,16 +395,16 @@ def compile(
sys.exit(1)
_rich_success("All primitives validated successfully!", symbol="sparkles")
_rich_info(f"Validated {primitives.count()} primitives:")
- _rich_info(f" • {len(primitives.chatmodes)} chatmodes")
- _rich_info(f" • {len(primitives.instructions)} instructions")
- _rich_info(f" • {len(primitives.contexts)} contexts")
+ _rich_info(f" * {len(primitives.chatmodes)} chatmodes")
+ _rich_info(f" * {len(primitives.instructions)} instructions")
+ _rich_info(f" * {len(primitives.contexts)} contexts")
# Show MCP dependency validation count
try:
from ..models.apm_package import APMPackage
apm_pkg = APMPackage.from_apm_yml(Path("apm.yml"))
mcp_count = len(apm_pkg.get_mcp_dependencies())
if mcp_count > 0:
- _rich_info(f" • {mcp_count} MCP dependencies")
+ _rich_info(f" * {mcp_count} MCP dependencies")
except Exception:
pass
return
@@ -460,7 +460,7 @@ def compile(
if detected_target == "minimal":
_rich_info(f"Compiling for AGENTS.md only ({detection_reason})")
_rich_info(
- "💡 Create .github/ or .claude/ folder for full integration",
+ " Create .github/ or .claude/ folder for full integration",
symbol="light_bulb",
)
elif detected_target == "vscode" or detected_target == "agents":
@@ -609,17 +609,17 @@ def compile(
table.add_row(
"Instructions",
str(stats.get("instructions", 0)),
- "✅ All validated",
+ "[+] All validated",
)
table.add_row(
"Contexts",
str(stats.get("contexts", 0)),
- "✅ All validated",
+ "[+] All validated",
)
table.add_row(
"Chatmodes",
str(stats.get("chatmodes", 0)),
- "✅ All validated",
+ "[+] All validated",
)
# Output row with file size
@@ -636,7 +636,7 @@ def compile(
except:
output_details = f"{output_path.name}"
- table.add_row("Output", "✨ SUCCESS", output_details)
+ table.add_row("Output", "* SUCCESS", output_details)
console.print(table)
else:
@@ -645,9 +645,9 @@ def compile(
f"Processed {stats.get('primitives_found', 0)} primitives:"
)
_rich_info(
- f" • {stats.get('instructions', 0)} instructions"
+ f" * {stats.get('instructions', 0)} instructions"
)
- _rich_info(f" • {stats.get('contexts', 0)} contexts")
+ _rich_info(f" * {stats.get('contexts', 0)} contexts")
_rich_info(
f"Constitution status: {c_status} hash={c_hash or '-'}"
)
@@ -656,8 +656,8 @@ def compile(
_rich_info(
f"Processed {stats.get('primitives_found', 0)} primitives:"
)
- _rich_info(f" • {stats.get('instructions', 0)} instructions")
- _rich_info(f" • {stats.get('contexts', 0)} contexts")
+ _rich_info(f" * {stats.get('instructions', 0)} instructions")
+ _rich_info(f" * {stats.get('contexts', 0)} contexts")
_rich_info(
f"Constitution status: {c_status} hash={c_hash or '-'}"
)
@@ -667,7 +667,7 @@ def compile(
"..." if len(final_content) > 500 else ""
)
_rich_panel(
- preview, title="📋 Generated Content Preview", style="cyan"
+ preview, title=" Generated Content Preview", style="cyan"
)
else:
next_steps = [
@@ -681,23 +681,23 @@ def compile(
from rich.panel import Panel
steps_content = "\n".join(
- f"• {step}" for step in next_steps
+ f"* {step}" for step in next_steps
)
console.print(
Panel(
steps_content,
- title="💡 Next Steps",
+ title=" Next Steps",
border_style="blue",
)
)
else:
_rich_info("Next steps:")
for step in next_steps:
- click.echo(f" • {step}")
+ click.echo(f" * {step}")
except (ImportError, NameError):
_rich_info("Next steps:")
for step in next_steps:
- click.echo(f" • {step}")
+ click.echo(f" * {step}")
# Common error handling for both compilation modes
# Note: Warnings are handled by professional formatters for distributed mode
@@ -708,12 +708,12 @@ def compile(
f"Compilation completed with {len(result.warnings)} warnings:"
)
for warning in result.warnings:
- click.echo(f" ⚠️ {warning}")
+ click.echo(f" [!] {warning}")
if result.errors:
_rich_error(f"Compilation failed with {len(result.errors)} errors:")
for error in result.errors:
- click.echo(f" ❌ {error}")
+ click.echo(f" [x] {error}")
sys.exit(1)
# Check for orphaned packages after successful compilation
@@ -722,11 +722,11 @@ def compile(
if orphaned_packages:
_rich_blank_line()
_rich_warning(
- f"⚠️ Found {len(orphaned_packages)} orphaned package(s) that were included in compilation:"
+ f"[!] Found {len(orphaned_packages)} orphaned package(s) that were included in compilation:"
)
for pkg in orphaned_packages:
- _rich_info(f" • {pkg}")
- _rich_info("💡 Run 'apm prune' to remove orphaned packages")
+ _rich_info(f" * {pkg}")
+ _rich_info(" Run 'apm prune' to remove orphaned packages")
except Exception:
pass # Continue if orphan check fails
diff --git a/src/apm_cli/commands/deps.py b/src/apm_cli/commands/deps.py
index 4a1d4627e..17f4379f0 100644
--- a/src/apm_cli/commands/deps.py
+++ b/src/apm_cli/commands/deps.py
@@ -44,17 +44,17 @@ def list_packages():
# Check if apm_modules exists
if not apm_modules_path.exists():
if has_rich:
- console.print("💡 No APM dependencies installed yet", style="cyan")
+ console.print(" No APM dependencies installed yet", style="cyan")
console.print("Run 'apm install' to install dependencies from apm.yml", style="dim")
else:
- click.echo("💡 No APM dependencies installed yet")
+ click.echo(" No APM dependencies installed yet")
click.echo("Run 'apm install' to install dependencies from apm.yml")
return
# Load project dependencies to check for orphaned packages
# GitHub: owner/repo or owner/virtual-pkg-name (2 levels)
# Azure DevOps: org/project/repo or org/project/virtual-pkg-name (3 levels)
- declared_sources = {} # dep_path → 'github' | 'azure-devops'
+ declared_sources = {} # dep_path -> 'github' | 'azure-devops'
try:
apm_yml_path = project_root / "apm.yml"
if apm_yml_path.exists():
@@ -127,7 +127,7 @@ def list_packages():
continue
org_repo_name = "/".join(rel_parts)
- # Skip sub-skills inside .apm/ directories — they belong to the parent package
+ # Skip sub-skills inside .apm/ directories -- they belong to the parent package
if '.apm' in rel_parts:
continue
@@ -157,18 +157,18 @@ def list_packages():
'is_orphaned': is_orphaned
})
except Exception as e:
- click.echo(f"⚠️ Warning: Failed to read package {org_repo_name}: {e}")
+ click.echo(f"[!] Warning: Failed to read package {org_repo_name}: {e}")
if not installed_packages:
if has_rich:
- console.print("💡 apm_modules/ directory exists but contains no valid packages", style="cyan")
+ console.print(" apm_modules/ directory exists but contains no valid packages", style="cyan")
else:
- click.echo("💡 apm_modules/ directory exists but contains no valid packages")
+ click.echo(" apm_modules/ directory exists but contains no valid packages")
return
# Display packages in table format
if has_rich:
- table = Table(title="📋 APM Dependencies", show_header=True, header_style="bold cyan")
+ table = Table(title=" APM Dependencies", show_header=True, header_style="bold cyan")
table.add_column("Package", style="bold white")
table.add_column("Version", style="yellow")
table.add_column("Source", style="blue")
@@ -195,13 +195,13 @@ def list_packages():
# Show orphaned packages warning
if orphaned_packages:
- console.print(f"\n⚠️ {len(orphaned_packages)} orphaned package(s) found (not in apm.yml):", style="yellow")
+ console.print(f"\n[!] {len(orphaned_packages)} orphaned package(s) found (not in apm.yml):", style="yellow")
for pkg in orphaned_packages:
- console.print(f" • {pkg}", style="dim yellow")
- console.print("\n💡 Run 'apm prune' to remove orphaned packages", style="cyan")
+ console.print(f" * {pkg}", style="dim yellow")
+ console.print("\n Run 'apm prune' to remove orphaned packages", style="cyan")
else:
# Fallback text table
- click.echo("📋 APM Dependencies:")
+ click.echo(" APM Dependencies:")
click.echo(f"{'Package':<30} {'Version':<10} {'Source':<12} {'Prompts':>7} {'Instr':>7} {'Agents':>7} {'Skills':>7} {'Hooks':>7}")
click.echo("-" * 98)
@@ -219,10 +219,10 @@ def list_packages():
# Show orphaned packages warning
if orphaned_packages:
- click.echo(f"\n⚠️ {len(orphaned_packages)} orphaned package(s) found (not in apm.yml):")
+ click.echo(f"\n[!] {len(orphaned_packages)} orphaned package(s) found (not in apm.yml):")
for pkg in orphaned_packages:
- click.echo(f" • {pkg}")
- click.echo("\n💡 Run 'apm prune' to remove orphaned packages")
+ click.echo(f" * {pkg}")
+ click.echo("\n Run 'apm prune' to remove orphaned packages")
except Exception as e:
_rich_error(f"Error listing dependencies: {e}")
@@ -274,7 +274,7 @@ def tree():
direct = [d for d in lockfile_deps if d.depth <= 1]
transitive = [d for d in lockfile_deps if d.depth > 1]
- # Build parent→children map
+ # Build parent->children map
children_map: Dict[str, list] = {}
for dep in transitive:
parent_key = dep.resolved_by or ""
@@ -330,20 +330,20 @@ def _add_children(parent_branch, parent_repo_url, depth=0):
click.echo(f"{project_name} (local)")
if not direct:
- click.echo("└── No dependencies installed")
+ click.echo("+-- No dependencies installed")
else:
for i, dep in enumerate(direct):
is_last = i == len(direct) - 1
- prefix = "└── " if is_last else "├── "
+ prefix = "+-- " if is_last else "|-- "
display = _dep_display_name(dep)
click.echo(f"{prefix}{display}")
# Show transitive deps
kids = children_map.get(dep.repo_url, [])
- sub_prefix = " " if is_last else "│ "
+ sub_prefix = " " if is_last else "| "
for j, child in enumerate(kids):
child_is_last = j == len(kids) - 1
- child_prefix = "└── " if child_is_last else "├── "
+ child_prefix = "+-- " if child_is_last else "|-- "
click.echo(f"{sub_prefix}{child_prefix}{_dep_display_name(child)}")
else:
# Fallback: scan apm_modules directory (no lockfile)
@@ -382,7 +382,7 @@ def _add_children(parent_branch, parent_repo_url, depth=0):
else:
click.echo(f"{project_name} (local)")
if not apm_modules_path.exists():
- click.echo("└── No dependencies installed")
+ click.echo("+-- No dependencies installed")
except Exception as e:
_rich_error(f"Error showing dependency tree: {e}")
@@ -527,30 +527,30 @@ def info(package: str):
for context_type, count in package_info['context_files'].items():
if count > 0:
- content_lines.append(f" • {count} {context_type}")
+ content_lines.append(f" * {count} {context_type}")
if not any(count > 0 for count in package_info['context_files'].values()):
- content_lines.append(" • No context files found")
+ content_lines.append(" * No context files found")
content_lines.append("")
content_lines.append("[bold]Agent Workflows:[/bold]")
if package_info['workflows'] > 0:
- content_lines.append(f" • {package_info['workflows']} executable workflows")
+ content_lines.append(f" * {package_info['workflows']} executable workflows")
else:
- content_lines.append(" • No agent workflows found")
+ content_lines.append(" * No agent workflows found")
if package_info.get('hooks', 0) > 0:
content_lines.append("")
content_lines.append("[bold]Hooks:[/bold]")
- content_lines.append(f" • {package_info['hooks']} hook file(s)")
+ content_lines.append(f" * {package_info['hooks']} hook file(s)")
content = "\n".join(content_lines)
- panel = Panel(content, title=f"ℹ️ Package Info: {package}", border_style="cyan")
+ panel = Panel(content, title=f"[i] Package Info: {package}", border_style="cyan")
console.print(panel)
except ImportError:
# Fallback text display
- click.echo(f"ℹ️ Package Info: {package}")
+ click.echo(f"[i] Package Info: {package}")
click.echo("=" * 40)
click.echo(f"Name: {package_info['name']}")
click.echo(f"Version: {package_info['version']}")
@@ -563,22 +563,22 @@ def info(package: str):
for context_type, count in package_info['context_files'].items():
if count > 0:
- click.echo(f" • {count} {context_type}")
+ click.echo(f" * {count} {context_type}")
if not any(count > 0 for count in package_info['context_files'].values()):
- click.echo(" • No context files found")
+ click.echo(" * No context files found")
click.echo("")
click.echo("Agent Workflows:")
if package_info['workflows'] > 0:
- click.echo(f" • {package_info['workflows']} executable workflows")
+ click.echo(f" * {package_info['workflows']} executable workflows")
else:
- click.echo(" • No agent workflows found")
+ click.echo(" * No agent workflows found")
if package_info.get('hooks', 0) > 0:
click.echo("")
click.echo("Hooks:")
- click.echo(f" • {package_info['hooks']} hook file(s)")
+ click.echo(f" * {package_info['hooks']} hook file(s)")
except Exception as e:
_rich_error(f"Error reading package information: {e}")
@@ -595,7 +595,7 @@ def _is_nested_under_package(candidate: Path, apm_modules_path: Path) -> bool:
the ``rglob`` scan would otherwise treat each skill sub-directory as an
independent package. This helper walks up from *candidate* towards
*apm_modules_path* and returns ``True`` if any intermediate parent already
- contains ``apm.yml`` — meaning the candidate is a deployment artifact, not
+ contains ``apm.yml`` -- meaning the candidate is a deployment artifact, not
a standalone package.
"""
parent = candidate.parent
@@ -826,7 +826,7 @@ def _update_single_package(package_name: str, project_deps: List, apm_modules_pa
# Download latest version
package_info = downloader.download_package(str(target_dep), package_dir)
- _rich_success(f"✅ Updated {target_dep.repo_url}")
+ _rich_success(f"[+] Updated {target_dep.repo_url}")
except Exception as e:
_rich_error(f"Failed to update {package_name}: {e}")
@@ -862,17 +862,17 @@ def _update_all_packages(project_deps: List, apm_modules_path: Path):
package_dir = apm_modules_path / dep.repo_url
if not package_dir.exists():
- _rich_warning(f"⚠️ {dep.repo_url} not installed - skipping")
+ _rich_warning(f"[!] {dep.repo_url} not installed - skipping")
continue
try:
_rich_info(f" Updating {dep.repo_url}...")
package_info = downloader.download_package(str(dep), package_dir)
updated_count += 1
- _rich_success(f" ✅ {dep.repo_url}")
+ _rich_success(f" [+] {dep.repo_url}")
except Exception as e:
- _rich_error(f" ❌ Failed to update {dep.repo_url}: {e}")
+ _rich_error(f" [x] Failed to update {dep.repo_url}: {e}")
continue
_rich_success(f"Updated {updated_count} of {len(project_deps)} packages")
diff --git a/src/apm_cli/commands/init.py b/src/apm_cli/commands/init.py
index 253c98310..b0f2351bb 100644
--- a/src/apm_cli/commands/init.py
+++ b/src/apm_cli/commands/init.py
@@ -95,13 +95,13 @@ def init(ctx, project_name, yes):
console = _get_console()
if console:
files_data = [
- ("✨", "apm.yml", "Project configuration"),
+ ("*", "apm.yml", "Project configuration"),
]
table = _create_files_table(files_data, title="Created Files")
console.print(table)
except (ImportError, NameError):
_rich_info("Created:")
- _rich_echo(" ✨ apm.yml - Project configuration", style="muted")
+ _rich_echo(" * apm.yml - Project configuration", style="muted")
_rich_blank_line()
@@ -115,14 +115,14 @@ def init(ctx, project_name, yes):
try:
_rich_panel(
- "\n".join(f"• {step}" for step in next_steps),
- title="💡 Next Steps",
+ "\n".join(f"* {step}" for step in next_steps),
+ title=" Next Steps",
style="cyan",
)
except (ImportError, NameError):
_rich_info("Next steps:")
for step in next_steps:
- click.echo(f" • {step}")
+ click.echo(f" * {step}")
except Exception as e:
_rich_error(f"Error initializing project: {e}")
diff --git a/src/apm_cli/commands/list_cmd.py b/src/apm_cli/commands/list_cmd.py
index e07208a3a..55465ad86 100644
--- a/src/apm_cli/commands/list_cmd.py
+++ b/src/apm_cli/commands/list_cmd.py
@@ -41,7 +41,7 @@ def list(ctx):
style="blue",
)
except (ImportError, NameError):
- _rich_info("💡 Add scripts to your apm.yml file:")
+ _rich_info(" Add scripts to your apm.yml file:")
click.echo("scripts:")
click.echo(' start: "codex run main.prompt.md"')
click.echo(' fast: "llm prompt main.prompt.md -m github/gpt-4o-mini"')
@@ -57,7 +57,7 @@ def list(ctx):
# Create a nice table for scripts
table = Table(
- title="📋 Available Scripts",
+ title=" Available Scripts",
show_header=True,
header_style="bold cyan",
)
diff --git a/src/apm_cli/commands/mcp.py b/src/apm_cli/commands/mcp.py
index aeee90c09..26a18f00a 100644
--- a/src/apm_cli/commands/mcp.py
+++ b/src/apm_cli/commands/mcp.py
@@ -48,17 +48,17 @@ def search(ctx, query, limit):
if not servers:
console.print(
- f"\n[yellow]⚠[/yellow] No MCP servers found matching '[bold]{query}[/bold]'"
+ f"\n[yellow][!][/yellow] No MCP servers found matching '[bold]{query}[/bold]'"
)
console.print(
- "\n[muted]💡 Try broader search terms or check the spelling[/muted]"
+ "\n[muted] Try broader search terms or check the spelling[/muted]"
)
return
# Results summary
total_shown = len(servers)
console.print(
- f"\n[green]✓[/green] Found [bold]{total_shown}[/bold] MCP server{'s' if total_shown != 1 else ''}"
+ f"\n[green]+[/green] Found [bold]{total_shown}[/bold] MCP server{'s' if total_shown != 1 else ''}"
)
# Professional results table
@@ -72,7 +72,7 @@ def search(ctx, query, limit):
for server in servers:
name = server.get("name", "Unknown")
desc = server.get("description", "No description available")
- version = server.get("version", "—")
+ version = server.get("version", " --")
# Intelligent description truncation
if len(desc) > 80:
@@ -90,7 +90,7 @@ def search(ctx, query, limit):
# Helpful next steps
console.print(
- f"\n[muted]💡 Use [bold cyan]apm mcp show [/bold cyan] for detailed information[/muted]"
+ f"\n[muted] Use [bold cyan]apm mcp show [/bold cyan] for detailed information[/muted]"
)
if total_shown == limit:
console.print(
@@ -138,10 +138,10 @@ def show(ctx, server_name):
server_info = registry.get_package_info(server_name)
except ValueError:
console.print(
- f"\n[red]✗[/red] MCP server '[bold]{server_name}[/bold]' not found in registry"
+ f"\n[red]x[/red] MCP server '[bold]{server_name}[/bold]' not found in registry"
)
console.print(
- f"\n[muted]💡 Use [bold cyan]apm mcp search [/bold cyan] to find available servers[/muted]"
+ f"\n[muted] Use [bold cyan]apm mcp search [/bold cyan] to find available servers[/muted]"
)
sys.exit(1)
@@ -165,7 +165,7 @@ def show(ctx, server_name):
# Main server information table
info_table = Table(
- title=f"📦 MCP Server: {name}",
+ title=f" MCP Server: {name}",
show_header=True,
header_style="bold cyan",
border_style="cyan",
@@ -189,9 +189,9 @@ def show(ctx, server_name):
for remote in remotes:
transport_type = remote.get("transport_type", "unknown")
if transport_type == "sse":
- deployment_info.append("🌐 Remote SSE Endpoint")
+ deployment_info.append(" Remote SSE Endpoint")
if packages:
- deployment_info.append("📦 Local Package")
+ deployment_info.append(" Local Package")
if deployment_info:
info_table.add_row("Deployment Type", " + ".join(deployment_info))
@@ -201,7 +201,7 @@ def show(ctx, server_name):
# Show remote endpoints if available
if remotes:
remote_table = Table(
- title="🌐 Remote Endpoints",
+ title=" Remote Endpoints",
show_header=True,
header_style="bold cyan",
border_style="cyan",
@@ -226,7 +226,7 @@ def show(ctx, server_name):
# Installation packages in consistent table format
if packages:
pkg_table = Table(
- title="📦 Local Packages",
+ title=" Local Packages",
show_header=True,
header_style="bold cyan",
border_style="cyan",
@@ -239,7 +239,7 @@ def show(ctx, server_name):
for pkg in packages:
registry_name = pkg.get("registry_name", "unknown")
pkg_name = pkg.get("name", "unknown")
- runtime_hint = pkg.get("runtime_hint", "—")
+ runtime_hint = pkg.get("runtime_hint", " --")
# Describe features of local packages
features = "Full configuration control"
@@ -257,7 +257,7 @@ def show(ctx, server_name):
# Installation instructions in structured table format
install_name = server_info.get("name", server_name)
install_table = Table(
- title="✨ Installation Guide",
+ title="* Installation Guide",
show_header=True,
header_style="bold cyan",
border_style="green",
@@ -317,16 +317,16 @@ def list(ctx, limit):
servers = registry.list_available_packages()[:limit]
if not servers:
- console.print(f"\n[yellow]⚠[/yellow] No MCP servers found in registry")
+ console.print(f"\n[yellow][!][/yellow] No MCP servers found in registry")
console.print(
- f"\n[muted]💡 The registry might be temporarily unavailable[/muted]"
+ f"\n[muted] The registry might be temporarily unavailable[/muted]"
)
return
# Results summary with pagination info
total_shown = len(servers)
console.print(
- f"\n[green]✓[/green] Showing [bold]{total_shown}[/bold] MCP servers"
+ f"\n[green]+[/green] Showing [bold]{total_shown}[/bold] MCP servers"
)
if total_shown == limit:
console.print(
@@ -344,7 +344,7 @@ def list(ctx, limit):
for server in servers:
name = server.get("name", "Unknown")
desc = server.get("description", "No description available")
- version = server.get("version", "—")
+ version = server.get("version", " --")
# Intelligent description truncation
if len(desc) > 80:
@@ -362,7 +362,7 @@ def list(ctx, limit):
# Helpful navigation
console.print(
- f"\n[muted]💡 Use [bold cyan]apm mcp show [/bold cyan] for detailed information[/muted]"
+ f"\n[muted] Use [bold cyan]apm mcp show [/bold cyan] for detailed information[/muted]"
)
console.print(
f"[muted] Use [bold cyan]apm mcp search [/bold cyan] to find specific servers[/muted]"
diff --git a/src/apm_cli/commands/pack.py b/src/apm_cli/commands/pack.py
index 72fae82eb..7b3833d40 100644
--- a/src/apm_cli/commands/pack.py
+++ b/src/apm_cli/commands/pack.py
@@ -48,7 +48,7 @@ def pack_cmd(ctx, fmt, target, archive, output, dry_run):
)
if dry_run:
- _rich_info("Dry run — no files written")
+ _rich_info("Dry run -- no files written")
if result.files:
_rich_info(f"Would pack {len(result.files)} file(s):")
for f in result.files:
@@ -58,9 +58,9 @@ def pack_cmd(ctx, fmt, target, archive, output, dry_run):
return
if not result.files:
- _rich_warning("No deployed files found — empty bundle created")
+ _rich_warning("No deployed files found -- empty bundle created")
else:
- _rich_success(f"Packed {len(result.files)} file(s) → {result.bundle_path}")
+ _rich_success(f"Packed {len(result.files)} file(s) -> {result.bundle_path}")
except (FileNotFoundError, ValueError) as exc:
_rich_error(str(exc))
@@ -92,7 +92,7 @@ def unpack_cmd(ctx, bundle_path, output, skip_verify, dry_run):
)
if dry_run:
- _rich_info("Dry run — no files written")
+ _rich_info("Dry run -- no files written")
if result.files:
_rich_info(f"Would unpack {len(result.files)} file(s):")
_log_unpack_file_list(result)
diff --git a/src/apm_cli/commands/prune.py b/src/apm_cli/commands/prune.py
index 4ee925073..d07f209cc 100644
--- a/src/apm_cli/commands/prune.py
+++ b/src/apm_cli/commands/prune.py
@@ -81,14 +81,14 @@ def prune(ctx, dry_run):
pkg_path = apm_modules_dir.joinpath(*path_parts)
try:
shutil.rmtree(pkg_path)
- _rich_info(f"✓ Removed {org_repo_name}")
+ _rich_info(f"+ Removed {org_repo_name}")
removed_count += 1
pruned_keys.append(org_repo_name)
deleted_pkg_paths.append(pkg_path)
except Exception as e:
- _rich_error(f"✗ Failed to remove {org_repo_name}: {e}")
+ _rich_error(f"x Failed to remove {org_repo_name}: {e}")
- # Batch parent cleanup — single bottom-up pass
+ # Batch parent cleanup -- single bottom-up pass
from ..integration.base_integrator import BaseIntegrator
BaseIntegrator.cleanup_empty_parents(deleted_pkg_paths, stop_at=apm_modules_dir)
@@ -119,10 +119,10 @@ def prune(ctx, dry_run):
# Remove from lockfile
if dep_key in lockfile.dependencies:
del lockfile.dependencies[dep_key]
- # Batch parent cleanup — single bottom-up pass
+ # Batch parent cleanup -- single bottom-up pass
BaseIntegrator.cleanup_empty_parents(deleted_targets, stop_at=project_root)
if deployed_cleaned > 0:
- _rich_info(f"✓ Cleaned {deployed_cleaned} deployed integration file(s)")
+ _rich_info(f"+ Cleaned {deployed_cleaned} deployed integration file(s)")
# Write updated lockfile (or remove if empty)
try:
if lockfile.dependencies:
diff --git a/src/apm_cli/commands/run.py b/src/apm_cli/commands/run.py
index 173f3b18d..efb87a258 100644
--- a/src/apm_cli/commands/run.py
+++ b/src/apm_cli/commands/run.py
@@ -133,7 +133,7 @@ def preview(ctx, script_name, param):
try:
# Show original and compiled commands in panels
- _rich_panel(command, title="📄 Original command", style="blue")
+ _rich_panel(command, title=" Original command", style="blue")
# Auto-compile prompts to show what would be executed
compiled_command, compiled_prompt_files = (
@@ -142,12 +142,12 @@ def preview(ctx, script_name, param):
if compiled_prompt_files:
_rich_panel(
- compiled_command, title="⚡ Compiled command", style="green"
+ compiled_command, title="> Compiled command", style="green"
)
else:
_rich_panel(
compiled_command,
- title="⚡ Command (no prompt compilation)",
+ title="> Command (no prompt compilation)",
style="yellow",
)
_rich_warning(
@@ -164,16 +164,16 @@ def preview(ctx, script_name, param):
compiled_path = Path(".apm/compiled") / output_name
file_list.append(str(compiled_path))
- files_content = "\n".join([f"📄 {file}" for file in file_list])
+ files_content = "\n".join([f" {file}" for file in file_list])
_rich_panel(
- files_content, title="📁 Compiled prompt files", style="cyan"
+ files_content, title=" Compiled prompt files", style="cyan"
)
else:
_rich_panel(
"No .prompt.md files were compiled.\n\n"
+ "APM only compiles files ending with '.prompt.md' extension.\n"
+ "Other files are executed as-is by the runtime.",
- title="ℹ️ Compilation Info",
+ title="[i] Compilation Info",
style="cyan",
)
diff --git a/src/apm_cli/commands/runtime.py b/src/apm_cli/commands/runtime.py
index 75f7bede7..22bbadc8b 100644
--- a/src/apm_cli/commands/runtime.py
+++ b/src/apm_cli/commands/runtime.py
@@ -67,7 +67,7 @@ def list():
console = _get_console()
# Create a nice table for runtimes
table = Table(
- title="🤖 Available Runtimes",
+ title=" Available Runtimes",
show_header=True,
header_style="bold cyan",
)
@@ -103,7 +103,7 @@ def list():
click.echo()
for name, info in runtimes.items():
- status_icon = "✅" if info["installed"] else "❌"
+ status_icon = "[+]" if info["installed"] else "[x]"
status_text = "Installed" if info["installed"] else "Not installed"
click.echo(f"{status_icon} {HIGHLIGHT}{name}{RESET}")
@@ -159,21 +159,21 @@ def status():
try:
# Create a nice status display
- status_content = f"""Preference order: {' → '.join(preference)}
+ status_content = f"""Preference order: {' -> '.join(preference)}
Active runtime: {available_runtime if available_runtime else 'None available'}"""
if not available_runtime:
status_content += f"\n\n{STATUS_SYMBOLS['info']} Run 'apm runtime setup copilot' to install the primary runtime"
- _rich_panel(status_content, title="📊 Runtime Status", style="cyan")
+ _rich_panel(status_content, title=" Runtime Status", style="cyan")
except (ImportError, NameError):
# Fallback display
_rich_info("Runtime Status:")
click.echo()
- click.echo(f"Preference order: {' → '.join(preference)}")
+ click.echo(f"Preference order: {' -> '.join(preference)}")
if available_runtime:
_rich_success(f"Active runtime: {available_runtime}")
diff --git a/src/apm_cli/commands/uninstall.py b/src/apm_cli/commands/uninstall.py
index b8c252034..99a523502 100644
--- a/src/apm_cli/commands/uninstall.py
+++ b/src/apm_cli/commands/uninstall.py
@@ -113,10 +113,10 @@ def _parse_dependency_entry(dep_entry):
if matched_dep is not None:
packages_to_remove.append(matched_dep)
- _rich_info(f"✓ {package} - found in apm.yml")
+ _rich_info(f"+ {package} - found in apm.yml")
else:
packages_not_found.append(package)
- _rich_warning(f"✗ {package} - not found in apm.yml")
+ _rich_warning(f"x {package} - not found in apm.yml")
if not packages_to_remove:
_rich_warning("No packages found in apm.yml to remove")
@@ -222,17 +222,17 @@ def _parse_dependency_entry(dep_entry):
if package_path.exists():
try:
shutil.rmtree(package_path)
- _rich_info(f"✓ Removed {package} from apm_modules/")
+ _rich_info(f"+ Removed {package} from apm_modules/")
removed_from_modules += 1
deleted_pkg_paths.append(package_path)
except Exception as e:
_rich_error(
- f"✗ Failed to remove {package} from apm_modules/: {e}"
+ f"x Failed to remove {package} from apm_modules/: {e}"
)
else:
_rich_warning(f"Package {package} not found in apm_modules/")
- # Batch parent cleanup — single bottom-up pass
+ # Batch parent cleanup -- single bottom-up pass
from ..integration.base_integrator import BaseIntegrator as _BI2
_BI2.cleanup_empty_parents(deleted_pkg_paths, stop_at=apm_modules_dir)
@@ -307,13 +307,13 @@ def _find_transitive_orphans(lockfile, removed_urls):
if orphan_path.exists():
try:
shutil.rmtree(orphan_path)
- _rich_info(f"✓ Removed transitive dependency {orphan_key} from apm_modules/")
+ _rich_info(f"+ Removed transitive dependency {orphan_key} from apm_modules/")
removed_from_modules += 1
deleted_orphan_paths.append(orphan_path)
except Exception as e:
- _rich_error(f"✗ Failed to remove transitive dep {orphan_key}: {e}")
+ _rich_error(f"x Failed to remove transitive dep {orphan_key}: {e}")
- # Batch parent cleanup — single bottom-up pass
+ # Batch parent cleanup -- single bottom-up pass
from ..integration.base_integrator import BaseIntegrator as _BI
_BI.cleanup_empty_parents(deleted_orphan_paths, stop_at=apm_modules_dir)
@@ -362,7 +362,7 @@ def _find_transitive_orphans(lockfile, removed_urls):
if lockfile.dependencies:
lockfile.write(lockfile_path)
else:
- # No deps left — remove lockfile
+ # No deps left -- remove lockfile
lockfile_path.unlink(missing_ok=True)
except Exception:
pass
@@ -390,8 +390,8 @@ def _find_transitive_orphans(lockfile, removed_urls):
# Use pre-collected deployed_files (captured before lockfile entries were deleted)
sync_managed = all_deployed_files if all_deployed_files else None
- # Pre-partition managed files by integration type — single O(M)
- # pass instead of 6× O(M) prefix scans inside each integrator.
+ # Pre-partition managed files by integration type -- single O(M)
+ # pass instead of 6x O(M) prefix scans inside each integrator.
if sync_managed is not None:
_buckets = BaseIntegrator.partition_managed_files(sync_managed)
else:
@@ -500,21 +500,21 @@ def _find_transitive_orphans(lockfile, removed_urls):
pass # Best effort re-integration
except Exception:
- pass # Best effort cleanup — don't report false failures
+ pass # Best effort cleanup -- don't report false failures
# Show cleanup feedback
if prompts_cleaned > 0:
- _rich_info(f"✓ Cleaned up {prompts_cleaned} integrated prompt(s)")
+ _rich_info(f"+ Cleaned up {prompts_cleaned} integrated prompt(s)")
if agents_cleaned > 0:
- _rich_info(f"✓ Cleaned up {agents_cleaned} integrated agent(s)")
+ _rich_info(f"+ Cleaned up {agents_cleaned} integrated agent(s)")
if skills_cleaned > 0:
- _rich_info(f"✓ Cleaned up {skills_cleaned} skill(s)")
+ _rich_info(f"+ Cleaned up {skills_cleaned} skill(s)")
if commands_cleaned > 0:
- _rich_info(f"✓ Cleaned up {commands_cleaned} command(s)")
+ _rich_info(f"+ Cleaned up {commands_cleaned} command(s)")
if hooks_cleaned > 0:
- _rich_info(f"✓ Cleaned up {hooks_cleaned} hook(s)")
+ _rich_info(f"+ Cleaned up {hooks_cleaned} hook(s)")
if instructions_cleaned > 0:
- _rich_info(f"✓ Cleaned up {instructions_cleaned} instruction(s)")
+ _rich_info(f"+ Cleaned up {instructions_cleaned} instruction(s)")
# Clean up stale MCP servers after uninstall
try:
diff --git a/src/apm_cli/commands/update.py b/src/apm_cli/commands/update.py
index dd571c9a5..9cd290525 100644
--- a/src/apm_cli/commands/update.py
+++ b/src/apm_cli/commands/update.py
@@ -1,6 +1,7 @@
"""APM update command."""
import os
+import shutil
import sys
import click
@@ -9,6 +10,44 @@
from ..version import get_version
+def _is_windows_platform() -> bool:
+ """Return True when running on native Windows."""
+ return sys.platform == "win32"
+
+
+def _get_update_installer_url() -> str:
+ """Return the official installer URL for the current platform."""
+ installer_name = "install.ps1" if _is_windows_platform() else "install.sh"
+ return f"https://raw.githubusercontent.com/microsoft/apm/main/{installer_name}"
+
+
+def _get_update_installer_suffix() -> str:
+ """Return the file suffix for the downloaded installer script."""
+ return ".ps1" if _is_windows_platform() else ".sh"
+
+
+def _get_manual_update_command() -> str:
+ """Return the manual update command for the current platform."""
+ if _is_windows_platform():
+ return (
+ 'powershell -ExecutionPolicy Bypass -c '
+ '"irm https://raw.githubusercontent.com/microsoft/apm/main/install.ps1 | iex"'
+ )
+ return "curl -sSL https://raw.githubusercontent.com/microsoft/apm/main/install.sh | sh"
+
+
+def _get_installer_run_command(script_path: str) -> list[str]:
+ """Return the installer execution command for the current platform."""
+ if _is_windows_platform():
+ powershell_path = shutil.which("powershell") or shutil.which("pwsh")
+ if not powershell_path:
+ raise FileNotFoundError("PowerShell executable not found in PATH")
+ return [powershell_path, "-ExecutionPolicy", "Bypass", "-File", script_path]
+
+ shell_path = "/bin/sh" if os.path.exists("/bin/sh") else "sh"
+ return [shell_path, script_path]
+
+
@click.command(help="Update APM to the latest version")
@click.option("--check", is_flag=True, help="Only check for updates without installing")
def update(check):
@@ -62,7 +101,7 @@ def update(check):
_rich_info(f"Latest version available: {latest_version}", symbol="sparkles")
if check:
- _rich_warning(f"Update available: {current_version} → {latest_version}")
+ _rich_warning(f"Update available: {current_version} -> {latest_version}")
_rich_info("Run 'apm update' (without --check) to install", symbol="info")
return
@@ -73,29 +112,25 @@ def update(check):
try:
import requests
- install_script_url = (
- "https://raw.githubusercontent.com/microsoft/apm/main/install.sh"
- )
+ install_script_url = _get_update_installer_url()
response = requests.get(install_script_url, timeout=10)
response.raise_for_status()
# Create temporary file for install script
- with tempfile.NamedTemporaryFile(mode="w", suffix=".sh", delete=False) as f:
+ with tempfile.NamedTemporaryFile(
+ mode="w", suffix=_get_update_installer_suffix(), delete=False
+ ) as f:
temp_script = f.name
f.write(response.text)
- # Make script executable
- os.chmod(temp_script, 0o755)
+ if not _is_windows_platform():
+ os.chmod(temp_script, 0o755)
# Run install script
_rich_info("Running installer...", symbol="gear")
- # Use /bin/sh for better cross-platform compatibility
- # Note: We don't capture output so the installer can prompt for sudo
- shell_path = "/bin/sh" if os.path.exists("/bin/sh") else "sh"
- result = subprocess.run(
- [shell_path, temp_script], check=False
- )
+ # Note: We don't capture output so the installer can prompt when needed.
+ result = subprocess.run(_get_installer_run_command(temp_script), check=False)
# Clean up temp file
try:
@@ -119,16 +154,12 @@ def update(check):
except ImportError:
_rich_error("'requests' library not available")
_rich_info("Please update manually using:")
- click.echo(
- " curl -sSL https://raw.githubusercontent.com/microsoft/apm/main/install.sh | sh"
- )
+ click.echo(f" {_get_manual_update_command()}")
sys.exit(1)
except Exception as e:
_rich_error(f"Update failed: {e}")
_rich_info("Please update manually using:")
- click.echo(
- " curl -sSL https://raw.githubusercontent.com/microsoft/apm/main/install.sh | sh"
- )
+ click.echo(f" {_get_manual_update_command()}")
sys.exit(1)
except Exception as e:
diff --git a/src/apm_cli/compilation/agents_compiler.py b/src/apm_cli/compilation/agents_compiler.py
index 8e2cc3aca..97cb3bb9d 100644
--- a/src/apm_cli/compilation/agents_compiler.py
+++ b/src/apm_cli/compilation/agents_compiler.py
@@ -435,7 +435,7 @@ def _compile_claude_md(self, config: CompilationConfig, primitives: PrimitiveCol
rel_path = claude_path.relative_to(self.base_dir)
except ValueError:
rel_path = claude_path
- preview_lines.append(f" 📄 {rel_path}")
+ preview_lines.append(f" {rel_path}")
return CompilationResult(
success=len(all_errors) == 0,
@@ -756,7 +756,7 @@ def _display_placement_preview(self, distributed_result) -> None:
Args:
distributed_result: Result from distributed compilation.
"""
- print("🔍 Distributed AGENTS.md Placement Preview:")
+ print("Distributed AGENTS.md Placement Preview:")
print()
for placement in distributed_result.placements:
@@ -765,7 +765,7 @@ def _display_placement_preview(self, distributed_result) -> None:
except ValueError:
# Fallback for path resolution issues
rel_path = placement.agents_path
- print(f"📄 {rel_path}")
+ print(f"{rel_path}")
print(f" Instructions: {len(placement.instructions)}")
print(f" Patterns: {', '.join(sorted(placement.coverage_patterns))}")
if placement.source_attribution:
@@ -780,7 +780,7 @@ def _display_trace_info(self, distributed_result, primitives: PrimitiveCollectio
distributed_result: Result from distributed compilation.
primitives (PrimitiveCollection): Full primitive collection.
"""
- print("🔍 Distributed Compilation Trace:")
+ print("Distributed Compilation Trace:")
print()
for placement in distributed_result.placements:
@@ -788,7 +788,7 @@ def _display_trace_info(self, distributed_result, primitives: PrimitiveCollectio
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
except ValueError:
rel_path = placement.agents_path
- print(f"📄 {rel_path}")
+ print(f"{rel_path}")
for instruction in placement.instructions:
source = getattr(instruction, 'source', 'local')
@@ -797,7 +797,7 @@ def _display_trace_info(self, distributed_result, primitives: PrimitiveCollectio
except ValueError:
inst_path = instruction.file_path
- print(f" • {instruction.apply_to or 'no pattern'} <- {source} {inst_path}")
+ print(f" * {instruction.apply_to or 'no pattern'} <- {source} {inst_path}")
print()
def _generate_placement_summary(self, distributed_result) -> str:
@@ -816,7 +816,7 @@ def _generate_placement_summary(self, distributed_result) -> str:
rel_path = placement.agents_path.relative_to(self.base_dir.resolve())
except ValueError:
rel_path = placement.agents_path
- lines.append(f"📄 {rel_path}")
+ lines.append(f"{rel_path}")
lines.append(f" Instructions: {len(placement.instructions)}")
lines.append(f" Patterns: {', '.join(sorted(placement.coverage_patterns))}")
lines.append("")
diff --git a/src/apm_cli/compilation/context_optimizer.py b/src/apm_cli/compilation/context_optimizer.py
index 33601acec..2e8276340 100644
--- a/src/apm_cli/compilation/context_optimizer.py
+++ b/src/apm_cli/compilation/context_optimizer.py
@@ -151,7 +151,7 @@ def _time_phase(self, phase_name: str, operation_func, *args, **kwargs):
# Only show timing in verbose mode with professional formatting
if self._timing_enabled and hasattr(self, '_verbose') and self._verbose:
- print(f"⏱️ {phase_name}: {duration*1000:.1f}ms")
+ print(f" {phase_name}: {duration*1000:.1f}ms")
return result
def _cached_glob(self, pattern: str) -> List[str]:
@@ -207,7 +207,7 @@ def optimize_instruction_placement(
self._errors.clear()
# Phase 1: Analyze project structure
- self._time_phase("📊 Project Analysis", self._analyze_project_structure)
+ self._time_phase("Project Analysis", self._analyze_project_structure)
# Phase 2: Analyze each instruction for optimal placement
placement_map: Dict[Path, List[Instruction]] = defaultdict(list)
@@ -241,7 +241,7 @@ def process_instructions():
for directory in optimal_placements:
placement_map[directory].append(instruction)
- self._time_phase("🎯 Instruction Processing", process_instructions)
+ self._time_phase("Instruction Processing", process_instructions)
return dict(placement_map)
@@ -430,7 +430,7 @@ def _analyze_project_structure(self) -> None:
if any(part.startswith('.') for part in current_path.parts[len(self.base_dir.parts):]):
continue
- # Default hardcoded exclusions — match on exact path components
+ # Default hardcoded exclusions -- match on exact path components
if any(part in DEFAULT_EXCLUDED_DIRNAMES for part in relative_path.parts):
continue
@@ -505,8 +505,14 @@ def _should_exclude_path(self, path: Path) -> bool:
return False
# Get path relative to base_dir for pattern matching
+ # Resolve the path first to handle cross-platform differences
+ # (e.g., on Windows Path('/test') != Path('C:/test') after resolve)
try:
- rel_path = path.relative_to(self.base_dir)
+ resolved = path.resolve()
+ except (OSError, FileNotFoundError):
+ resolved = path.absolute()
+ try:
+ rel_path = resolved.relative_to(self.base_dir)
except ValueError:
# Path is not relative to base_dir, don't exclude
return False
@@ -627,8 +633,8 @@ def _solve_placement_optimization(
"""Mathematical optimization solver for instruction placement.
Implements the mathematician's objective function:
- minimize: Σ(context_pollution × directory_weight)
- subject to: ∀instruction → ∃placement
+ minimize: sum(context_pollution x directory_weight)
+ subject to: for_all instruction -> exists placement
Args:
instruction (Instruction): Instruction to optimize placement for.
@@ -867,7 +873,7 @@ def _calculate_inheritance_pollution(self, directory: Path, pattern: str) -> flo
pollution_score = 0.0
# Optimization: Only check direct children instead of all directories
- # This prevents O(n²) complexity with unlimited depth analysis
+ # This prevents O(n2) complexity with unlimited depth analysis
try:
direct_children = [
child for child in directory.iterdir()
diff --git a/src/apm_cli/compilation/distributed_compiler.py b/src/apm_cli/compilation/distributed_compiler.py
index 639b192fd..686e92e6a 100644
--- a/src/apm_cli/compilation/distributed_compiler.py
+++ b/src/apm_cli/compilation/distributed_compiler.py
@@ -652,9 +652,9 @@ def _generate_orphan_warnings(self, orphaned_files: List[Path]) -> List[str]:
file_list = []
for file_path in orphaned_files[:5]: # Show first 5
rel_path = file_path.relative_to(self.base_dir)
- file_list.append(f" • {rel_path}")
+ file_list.append(f" * {rel_path}")
if len(orphaned_files) > 5:
- file_list.append(f" • ...and {len(orphaned_files) - 5} more")
+ file_list.append(f" * ...and {len(orphaned_files) - 5} more")
# Create one cohesive warning message
files_text = "\n".join(file_list)
@@ -679,20 +679,20 @@ def _cleanup_orphaned_files(self, orphaned_files: List[Path], dry_run: bool = Fa
if dry_run:
# In dry-run mode, just report what would be cleaned
- cleanup_messages.append(f"🧹 Would clean up {len(orphaned_files)} orphaned AGENTS.md files")
+ cleanup_messages.append(f"Would clean up {len(orphaned_files)} orphaned AGENTS.md files")
for file_path in orphaned_files:
rel_path = file_path.relative_to(self.base_dir)
- cleanup_messages.append(f" • {rel_path}")
+ cleanup_messages.append(f" * {rel_path}")
else:
# Actually perform the cleanup
- cleanup_messages.append(f"🧹 Cleaning up {len(orphaned_files)} orphaned AGENTS.md files")
+ cleanup_messages.append(f"Cleaning up {len(orphaned_files)} orphaned AGENTS.md files")
for file_path in orphaned_files:
try:
rel_path = file_path.relative_to(self.base_dir)
file_path.unlink()
- cleanup_messages.append(f" ✓ Removed {rel_path}")
+ cleanup_messages.append(f" + Removed {rel_path}")
except Exception as e:
- cleanup_messages.append(f" ✗ Failed to remove {rel_path}: {str(e)}")
+ cleanup_messages.append(f" x Failed to remove {rel_path}: {str(e)}")
return cleanup_messages
diff --git a/src/apm_cli/compilation/link_resolver.py b/src/apm_cli/compilation/link_resolver.py
index 2875594f8..30f58b8d4 100644
--- a/src/apm_cli/compilation/link_resolver.py
+++ b/src/apm_cli/compilation/link_resolver.py
@@ -29,7 +29,7 @@ class LinkResolutionContext:
source_location: Path # Original location (directory)
target_location: Path # Where file will live (directory or file)
base_dir: Path # Project root
- available_contexts: Dict[str, Path] # Map of context name → actual path
+ available_contexts: Dict[str, Path] # Map of context name -> actual path
class UnifiedLinkResolver:
@@ -60,8 +60,8 @@ def register_contexts(self, primitives) -> None:
"""Build registry of all available context files.
Registers contexts by:
- 1. Simple filename: "api-standards.context.md" → path
- 2. Qualified name (for dependencies): "company/standards:api.context.md" → path
+ 1. Simple filename: "api-standards.context.md" -> path
+ 2. Qualified name (for dependencies): "company/standards:api.context.md" -> path
Args:
primitives: Collection of discovered primitives (PrimitiveCollection)
@@ -259,7 +259,8 @@ def _resolve_context_link(self, link_path: str, ctx: LinkResolutionContext) -> O
# Use os.path.relpath to support ../ for paths outside target directory
try:
relative_path = os.path.relpath(actual_file, ctx.target_location)
- return relative_path
+ # Normalize to forward slashes for markdown link compatibility
+ return relative_path.replace(os.sep, '/')
except Exception:
return None
diff --git a/src/apm_cli/core/safe_installer.py b/src/apm_cli/core/safe_installer.py
index 6f91411c5..b897f6bd3 100644
--- a/src/apm_cli/core/safe_installer.py
+++ b/src/apm_cli/core/safe_installer.py
@@ -35,15 +35,15 @@ def has_any_changes(self) -> bool:
def log_summary(self):
"""Log a summary of installation results."""
if self.installed:
- _rich_success(f"✅ Installed: {', '.join(self.installed)}")
+ _rich_success(f"[+] Installed: {', '.join(self.installed)}")
if self.skipped:
for item in self.skipped:
- _rich_warning(f"⚠️ Skipped {item['server']}: {item['reason']}")
+ _rich_warning(f"[!] Skipped {item['server']}: {item['reason']}")
if self.failed:
for item in self.failed:
- _rich_error(f"❌ Failed {item['server']}: {item['reason']}")
+ _rich_error(f"[x] Failed {item['server']}: {item['reason']}")
class SafeMCPInstaller:
@@ -109,15 +109,15 @@ def _log_skip(self, server_ref: str):
def _log_success(self, server_ref: str):
"""Log successful server installation."""
- _rich_success(f" ✓ {server_ref}")
+ _rich_success(f" + {server_ref}")
def _log_failure(self, server_ref: str):
"""Log failed server installation."""
- _rich_warning(f" ✗ {server_ref} installation failed")
+ _rich_warning(f" x {server_ref} installation failed")
def _log_error(self, server_ref: str, error: Exception):
"""Log error during server installation."""
- _rich_error(f" ✗ {server_ref}: {error}")
+ _rich_error(f" x {server_ref}: {error}")
def check_conflicts_only(self, server_references: List[str]) -> Dict[str, Any]:
"""Check for conflicts without installing.
diff --git a/src/apm_cli/core/script_runner.py b/src/apm_cli/core/script_runner.py
index 2daa2980c..a1dee68eb 100644
--- a/src/apm_cli/core/script_runner.py
+++ b/src/apm_cli/core/script_runner.py
@@ -3,6 +3,7 @@
import os
import re
import subprocess
+import sys
import time
import yaml
from pathlib import Path
@@ -53,7 +54,7 @@ def run_script(self, script_name: str, params: Dict[str, str]) -> bool:
if not config:
if is_virtual_package:
# Create minimal config for zero-config virtual package execution
- print(f" ℹ️ Creating minimal apm.yml for zero-config execution...")
+ print(f" [i] Creating minimal apm.yml for zero-config execution...")
self._create_minimal_config()
config = self._load_config()
else:
@@ -71,7 +72,7 @@ def run_script(self, script_name: str, params: Dict[str, str]) -> bool:
if discovered_prompt:
# Print discovery message early to allow E2E tests to validate
# This message appears before runtime detection, which may fail in test environments
- print(f"ℹ Auto-discovered: {discovered_prompt}")
+ print(f"[i] Auto-discovered: {discovered_prompt}")
# Detect runtime and generate command
runtime = self._detect_installed_runtime()
@@ -82,14 +83,14 @@ def run_script(self, script_name: str, params: Dict[str, str]) -> bool:
# 2.5 Try auto-install if it looks like a virtual package reference
if self._is_virtual_package_reference(script_name):
- print(f"\n📦 Auto-installing virtual package: {script_name}")
+ print(f"\n Auto-installing virtual package: {script_name}")
if self._auto_install_virtual_package(script_name):
# Retry discovery after install
discovered_prompt = self._discover_prompt_file(script_name)
if discovered_prompt:
# Signal successful install before attempting runtime detection
# This allows E2E tests to validate auto-install without requiring runtime
- print(f"\n✨ Package installed and ready to run\n")
+ print(f"\n* Package installed and ready to run\n")
runtime = self._detect_installed_runtime()
command = self._generate_runtime_command(runtime, discovered_prompt)
return self._execute_script_command(command, params)
@@ -186,6 +187,7 @@ def _execute_script_command(self, command: str, params: Dict[str, str]) -> bool:
)
else:
# Use regular shell execution for other commands
+ # (shell=True works cross-platform: bash on Unix, cmd.exe on Windows)
result = subprocess.run(
compiled_command, shell=True, check=True, env=env
)
@@ -433,7 +435,12 @@ def _execute_runtime_command(
import shlex
# Parse the command into arguments
- args = shlex.split(command.strip())
+ if sys.platform == "win32":
+ # On Windows, use posix=False to preserve Windows quoting semantics
+ # (e.g., paths with spaces, quoted arguments like --model "gpt-4o mini")
+ args = shlex.split(command.strip(), posix=False)
+ else:
+ args = shlex.split(command.strip())
# Handle environment variables at the beginning of the command
# Extract environment variables (key=value pairs) from the beginning of args
@@ -495,8 +502,8 @@ def _discover_prompt_file(self, name: str) -> Optional[Path]:
"""Discover prompt files by name across local and dependencies.
Supports both simple names and qualified paths:
- - Simple: "code-review" → searches everywhere
- - Qualified: "github/awesome-copilot/code-review" → searches specific package
+ - Simple: "code-review" -> searches everywhere
+ - Qualified: "github/awesome-copilot/code-review" -> searches specific package
Search order for simple names:
1. Local root: ./{name}.prompt.md
@@ -542,7 +549,7 @@ def _discover_prompt_file(self, name: str) -> Optional[Path]:
matches = list(apm_modules.rglob(search_name))
# Also search for SKILL.md in directories matching the name
- # e.g., name="architecture-blueprint-generator" → find */architecture-blueprint-generator/SKILL.md
+ # e.g., name="architecture-blueprint-generator" -> find */architecture-blueprint-generator/SKILL.md
for skill_dir in apm_modules.rglob(name):
if skill_dir.is_dir():
skill_file = skill_dir / "SKILL.md"
@@ -731,7 +738,7 @@ def _auto_install_virtual_package(self, package_ref: str) -> bool:
from ..models.apm_package import DependencyReference
from ..deps.github_downloader import GitHubPackageDownloader
- # Parse the reference as-is — no extension guessing
+ # Parse the reference as-is -- no extension guessing
dep_ref = DependencyReference.parse(package_ref)
if not dep_ref.is_virtual:
@@ -746,13 +753,13 @@ def _auto_install_virtual_package(self, package_ref: str) -> bool:
# Check if already installed
if target_path.exists():
- print(f" ℹ️ Package already installed at {target_path}")
+ print(f" [i] Package already installed at {target_path}")
return True
# Download the virtual package
downloader = GitHubPackageDownloader()
- print(f" 📥 Downloading from {dep_ref.to_github_url()}")
+ print(f" Downloading from {dep_ref.to_github_url()}")
if dep_ref.is_virtual_collection():
package_info = downloader.download_virtual_collection_package(
@@ -769,7 +776,7 @@ def _auto_install_virtual_package(self, package_ref: str) -> bool:
# PackageInfo has a 'package' attribute which is an APMPackage
print(
- f" ✅ Installed {package_info.package.name} v{package_info.package.version}"
+ f" [+] Installed {package_info.package.name} v{package_info.package.version}"
)
# Update apm.yml to include this dependency
@@ -778,7 +785,7 @@ def _auto_install_virtual_package(self, package_ref: str) -> bool:
return True
except Exception as e:
- print(f" ❌ Auto-install failed: {e}")
+ print(f" [x] Auto-install failed: {e}")
return False
def _add_dependency_to_config(self, package_ref: str) -> None:
@@ -811,7 +818,7 @@ def _add_dependency_to_config(self, package_ref: str) -> None:
with open(config_path, "w") as f:
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
- print(f" ℹ️ Added {package_ref} to apm.yml dependencies")
+ print(f" [i] Added {package_ref} to apm.yml dependencies")
def _create_minimal_config(self) -> None:
"""Create a minimal apm.yml for zero-config usage.
@@ -827,7 +834,7 @@ def _create_minimal_config(self) -> None:
with open("apm.yml", "w") as f:
yaml.dump(minimal_config, f, default_flow_style=False, sort_keys=False)
- print(f" ℹ️ Created minimal apm.yml for zero-config execution")
+ print(f" [i] Created minimal apm.yml for zero-config execution")
def _detect_installed_runtime(self) -> str:
"""Detect installed runtime with priority order.
diff --git a/src/apm_cli/core/target_detection.py b/src/apm_cli/core/target_detection.py
index 7737fdc8c..5f524ce46 100644
--- a/src/apm_cli/core/target_detection.py
+++ b/src/apm_cli/core/target_detection.py
@@ -8,10 +8,10 @@
1. Explicit --target flag (always wins)
2. apm.yml target setting (top-level field)
3. Auto-detect from existing folders:
- - .github/ exists AND .claude/ doesn't → copilot (internal: "vscode")
- - .claude/ exists AND .github/ doesn't → claude
- - Both exist → all
- - Neither exists → minimal (AGENTS.md only, no folder integration)
+ - .github/ exists AND .claude/ doesn't -> copilot (internal: "vscode")
+ - .claude/ exists AND .github/ doesn't -> claude
+ - Both exist -> all
+ - Neither exists -> minimal (AGENTS.md only, no folder integration)
"copilot" is the recommended user-facing target name. "vscode" and "agents"
are accepted as aliases and map to the same internal value.
diff --git a/src/apm_cli/core/token_manager.py b/src/apm_cli/core/token_manager.py
index 1cd901dde..b6cdb7e0f 100644
--- a/src/apm_cli/core/token_manager.py
+++ b/src/apm_cli/core/token_manager.py
@@ -11,7 +11,7 @@
- GITHUB_TOKEN: User-scoped PAT for GitHub Models API access
Platform Token Selection:
-- GitHub: GITHUB_APM_PAT → GITHUB_TOKEN → GH_TOKEN
+- GitHub: GITHUB_APM_PAT -> GITHUB_TOKEN -> GH_TOKEN
- Azure DevOps: ADO_APM_PAT
Runtime Requirements:
diff --git a/src/apm_cli/deps/apm_resolver.py b/src/apm_cli/deps/apm_resolver.py
index 3172042d6..717a4905c 100644
--- a/src/apm_cli/deps/apm_resolver.py
+++ b/src/apm_cli/deps/apm_resolver.py
@@ -430,6 +430,6 @@ def _create_resolution_summary(self, graph: DependencyGraph) -> str:
if summary['has_errors']:
lines.append(f" Resolution errors: {summary['error_count']}")
- lines.append(f" Status: {'✅ Valid' if summary['is_valid'] else '❌ Invalid'}")
+ lines.append(f" Status: {'[+] Valid' if summary['is_valid'] else '[x] Invalid'}")
return "\n".join(lines)
\ No newline at end of file
diff --git a/src/apm_cli/deps/github_downloader.py b/src/apm_cli/deps/github_downloader.py
index 740662a5e..26864eae7 100644
--- a/src/apm_cli/deps/github_downloader.py
+++ b/src/apm_cli/deps/github_downloader.py
@@ -185,7 +185,7 @@ def _setup_git_environment(self) -> Dict[str, Any]:
env = self.token_manager.setup_environment()
# Get tokens for modules (APM package access)
- # GitHub: GITHUB_APM_PAT → GITHUB_TOKEN
+ # GitHub: GITHUB_APM_PAT -> GITHUB_TOKEN
self.github_token = self.token_manager.get_token_for_purpose('modules', env)
self.has_github_token = self.github_token is not None
@@ -199,7 +199,7 @@ def _setup_git_environment(self) -> Dict[str, Any]:
env['GIT_TERMINAL_PROMPT'] = '0'
env['GIT_ASKPASS'] = 'echo' # Prevent interactive credential prompts
env['GIT_CONFIG_NOSYSTEM'] = '1'
- env['GIT_CONFIG_GLOBAL'] = '/dev/null'
+ env['GIT_CONFIG_GLOBAL'] = 'NUL' if sys.platform == 'win32' else '/dev/null'
return env
@@ -387,7 +387,7 @@ def _clone_with_fallback(self, repo_url_base: str, target_path: Path, progress_r
# When APM has a token for this host, use the locked-down env (APM manages auth).
# When no token is available, relax the env so git credential helpers (gh auth,
- # macOS Keychain, etc.) can provide credentials — regardless of host.
+ # macOS Keychain, etc.) can provide credentials -- regardless of host.
if has_token:
clone_env = self.git_env
else:
@@ -732,7 +732,7 @@ def _download_github_file(self, dep_ref: DependencyReference, file_path: str, re
)
elif e.response.status_code == 401 or e.response.status_code == 403:
# Token may lack SSO/SAML authorization for this org.
- # Retry without auth — the repo might be public.
+ # Retry without auth -- the repo might be public.
# Applies to github.com and GHES (custom domains can have public repos).
# Excluded: *.ghe.com (Enterprise Cloud Data Residency has no public repos).
if self.github_token and not host.lower().endswith(".ghe.com"):
@@ -829,9 +829,9 @@ def validate_virtual_package_exists(self, dep_ref: DependencyReference) -> bool:
except RuntimeError:
continue
- # Last resort: README.md — any well-formed directory should have one.
+ # Last resort: README.md -- any well-formed directory should have one.
# A directory that follows the Claude plugin spec (agents/, commands/,
- # skills/ …) with no manifest files is still a valid plugin.
+ # skills/ ...) with no manifest files is still a valid plugin.
try:
self.download_raw_file(dep_ref, f"{dep_ref.virtual_path}/README.md", ref)
return True
@@ -1518,8 +1518,8 @@ def progress_callback(op_code, cur_count, max_count=None, message=''):
"""Progress callback for Git operations."""
if max_count:
percentage = int((cur_count / max_count) * 100)
- print(f"\r🚀 Cloning: {percentage}% ({cur_count}/{max_count}) {message}", end='', flush=True)
+ print(f"\r Cloning: {percentage}% ({cur_count}/{max_count}) {message}", end='', flush=True)
else:
- print(f"\r🚀 Cloning: {message} ({cur_count})", end='', flush=True)
+ print(f"\r Cloning: {message} ({cur_count})", end='', flush=True)
return progress_callback
\ No newline at end of file
diff --git a/src/apm_cli/deps/lockfile.py b/src/apm_cli/deps/lockfile.py
index 3fc2229e4..6fadffb64 100644
--- a/src/apm_cli/deps/lockfile.py
+++ b/src/apm_cli/deps/lockfile.py
@@ -70,7 +70,7 @@ def from_dict(cls, data: Dict[str, Any]) -> "LockedDependency":
"""
deployed_files = list(data.get("deployed_files", []))
- # Migrate legacy deployed_skills → deployed_files
+ # Migrate legacy deployed_skills -> deployed_files
old_skills = data.get("deployed_skills", [])
if old_skills and not deployed_files:
for skill_name in old_skills:
diff --git a/src/apm_cli/deps/plugin_parser.py b/src/apm_cli/deps/plugin_parser.py
index ce47b14cb..9f66d6789 100644
--- a/src/apm_cli/deps/plugin_parser.py
+++ b/src/apm_cli/deps/plugin_parser.py
@@ -134,10 +134,10 @@ def _extract_mcp_servers(plugin_path: Path, manifest: Dict[str, Any]) -> Dict[st
"""Extract MCP server definitions from a plugin manifest.
Resolves ``mcpServers`` by type (per Claude Code spec):
- - ``str`` → read that file path relative to plugin root, parse JSON,
+ - ``str`` -> read that file path relative to plugin root, parse JSON,
extract ``mcpServers`` key.
- - ``list`` → read each file path, merge (last-wins on name conflict).
- - ``dict`` → use directly as inline server definitions.
+ - ``list`` -> read each file path, merge (last-wins on name conflict).
+ - ``dict`` -> use directly as inline server definitions.
When ``mcpServers`` is absent and ``.mcp.json`` (or ``.github/.mcp.json``)
exists at plugin root, read it as the default (matches Claude Code
@@ -153,7 +153,7 @@ def _extract_mcp_servers(plugin_path: Path, manifest: Dict[str, Any]) -> Dict[st
manifest: Parsed plugin.json dict.
Returns:
- dict mapping server name → server config. Empty on failure.
+ dict mapping server name -> server config. Empty on failure.
"""
logger = logging.getLogger("apm")
mcp_value = manifest.get("mcpServers")
@@ -254,14 +254,14 @@ def _mcp_servers_to_apm_deps(
"""Convert raw MCP server configs to ``dependencies.mcp`` dicts.
Transport inference:
- - ``command`` present → stdio
- - ``url`` present → http (or ``type`` if it's a valid transport)
- - Neither → skipped with warning
+ - ``command`` present -> stdio
+ - ``url`` present -> http (or ``type`` if it's a valid transport)
+ - Neither -> skipped with warning
Every entry gets ``registry: false`` (self-defined, not registry lookups).
Args:
- servers: Mapping of server name → server config dict.
+ servers: Mapping of server name -> server config dict.
plugin_path: Plugin root (used for log context only).
Returns:
@@ -310,13 +310,13 @@ def _map_plugin_artifacts(plugin_path: Path, apm_dir: Path, manifest: Optional[D
"""Map plugin artifacts to .apm/ subdirectories and copy pass-through files.
Copies:
- - agents/ → .apm/agents/
- - skills/ → .apm/skills/
- - commands/ → .apm/prompts/ (*.md normalized to *.prompt.md)
- - hooks/ → .apm/hooks/ (directory, config file, or inline object)
- - .mcp.json → .apm/.mcp.json (MCP-based plugins need this to function)
- - .lsp.json → .apm/.lsp.json
- - settings.json → .apm/settings.json
+ - agents/ -> .apm/agents/
+ - skills/ -> .apm/skills/
+ - commands/ -> .apm/prompts/ (*.md normalized to *.prompt.md)
+ - hooks/ -> .apm/hooks/ (directory, config file, or inline object)
+ - .mcp.json -> .apm/.mcp.json (MCP-based plugins need this to function)
+ - .lsp.json -> .apm/.lsp.json
+ - settings.json -> .apm/settings.json
When the manifest specifies custom component paths (e.g. ``"agents": ["custom/"]``),
those paths are used instead of the defaults.
@@ -331,7 +331,7 @@ def _map_plugin_artifacts(plugin_path: Path, apm_dir: Path, manifest: Optional[D
if manifest is None:
manifest = {}
- # Resolve source paths — use manifest arrays if present, else defaults.
+ # Resolve source paths -- use manifest arrays if present, else defaults.
# Custom paths may be directories OR individual files.
def _resolve_sources(component: str, default_dir: str):
"""Return list of existing source paths (dirs or files) for a component."""
@@ -351,7 +351,7 @@ def _resolve_sources(component: str, default_dir: str):
# Map agents/
# Unlike skills (which are named directories containing SKILL.md), agents
- # are flat files — each .md is one agent. So we always merge directory
+ # are flat files -- each .md is one agent. So we always merge directory
# contents directly into .apm/agents/ (no nesting by dir name).
agent_sources = _resolve_sources("agents", "agents")
if agent_sources:
@@ -394,7 +394,7 @@ def _resolve_sources(component: str, default_dir: str):
for f in skill_files:
shutil.copy2(f, target_skills / f.name)
- # Map commands/ → .apm/prompts/ (normalize .md → .prompt.md)
+ # Map commands/ -> .apm/prompts/ (normalize .md -> .prompt.md)
command_sources = _resolve_sources("commands", "commands")
if command_sources:
target_prompts = apm_dir / "prompts"
@@ -403,7 +403,7 @@ def _resolve_sources(component: str, default_dir: str):
target_prompts.mkdir(parents=True, exist_ok=True)
def _copy_command_file(source_file: Path, dest_dir: Path, rel_to: Path = None):
- """Copy a command file, normalizing .md → .prompt.md."""
+ """Copy a command file, normalizing .md -> .prompt.md."""
if rel_to:
relative_path = source_file.relative_to(rel_to)
target_path = dest_dir / relative_path
@@ -423,11 +423,11 @@ def _copy_command_file(source_file: Path, dest_dir: Path, rel_to: Path = None):
continue
_copy_command_file(source_file, target_prompts, rel_to=source)
- # Map hooks/ — the spec allows a directory path, a config file path,
+ # Map hooks/ -- the spec allows a directory path, a config file path,
# or an inline object. Handle all three forms.
hooks_value = manifest.get("hooks")
if isinstance(hooks_value, dict):
- # Inline hooks object → write as .apm/hooks/hooks.json
+ # Inline hooks object -> write as .apm/hooks/hooks.json
target_hooks = apm_dir / "hooks"
target_hooks.mkdir(parents=True, exist_ok=True)
(target_hooks / "hooks.json").write_text(
@@ -441,7 +441,7 @@ def _copy_command_file(source_file: Path, dest_dir: Path, rel_to: Path = None):
if not src_file.is_symlink():
shutil.copy2(src_file, target_hooks / "hooks.json")
else:
- # Directory path(s) — standard flow
+ # Directory path(s) -- standard flow
hook_sources = _resolve_sources("hooks", "hooks")
if hook_sources:
target_hooks = apm_dir / "hooks"
diff --git a/src/apm_cli/integration/agent_integrator.py b/src/apm_cli/integration/agent_integrator.py
index 212915ecb..7cedf1245 100644
--- a/src/apm_cli/integration/agent_integrator.py
+++ b/src/apm_cli/integration/agent_integrator.py
@@ -42,7 +42,7 @@ def find_agent_files(self, package_path: Path) -> List[Path]:
if apm_agents.exists():
agent_files.extend(apm_agents.rglob("*.agent.md"))
# Also pick up plain .md files in agents/; plugins may not use
- # the .agent.md convention — the directory name already implies type
+ # the .agent.md convention -- the directory name already implies type
for md_file in apm_agents.rglob("*.md"):
if (
not md_file.name.endswith(".agent.md")
diff --git a/src/apm_cli/integration/base_integrator.py b/src/apm_cli/integration/base_integrator.py
index 0d98f92b8..fcc7f5b0e 100644
--- a/src/apm_cli/integration/base_integrator.py
+++ b/src/apm_cli/integration/base_integrator.py
@@ -34,7 +34,7 @@ def __init__(self):
self.link_resolver: Optional[UnifiedLinkResolver] = None
# ------------------------------------------------------------------
- # Common behaviour — subclasses inherit directly
+ # Common behaviour -- subclasses inherit directly
# ------------------------------------------------------------------
def should_integrate(self, project_root: Path) -> bool: # noqa: ARG002
@@ -58,7 +58,7 @@ def check_collision(
A collision exists when **all** of these are true:
1. ``managed_files`` is not ``None`` (manifest mode)
2. ``target_path`` already exists on disk
- 3. ``rel_path`` is **not** in the managed set (→ user-authored)
+ 3. ``rel_path`` is **not** in the managed set (-> user-authored)
4. ``force`` is ``False``
When *diagnostics* is provided the skip is recorded there;
@@ -71,7 +71,7 @@ def check_collision(
return False
if not target_path.exists():
return False
- # managed_files is pre-normalized at the call site — O(1) lookup
+ # managed_files is pre-normalized at the call site -- O(1) lookup
if rel_path.replace("\\", "/") in managed_files:
return False
if force:
@@ -165,8 +165,8 @@ def cleanup_empty_parents(
"""Remove empty parent directories in a single bottom-up pass.
Collects all parent directories of *deleted_paths*, sorts by
- depth descending, and removes each if empty — O(H+D) syscalls
- instead of the per-file O(H×D) approach.
+ depth descending, and removes each if empty -- O(H+D) syscalls
+ instead of the per-file O(HxD) approach.
Args:
deleted_paths: Paths that were deleted (files or dirs).
@@ -257,7 +257,7 @@ def sync_remove_files(
if managed_files is not None:
for rel_path in managed_files:
- # managed_files is pre-normalized — no .replace() needed
+ # managed_files is pre-normalized -- no .replace() needed
if not rel_path.startswith(prefix):
continue
if not BaseIntegrator.validate_deploy_path(rel_path, project_root):
diff --git a/src/apm_cli/integration/hook_integrator.py b/src/apm_cli/integration/hook_integrator.py
index 35a07a343..fe3ee4061 100644
--- a/src/apm_cli/integration/hook_integrator.py
+++ b/src/apm_cli/integration/hook_integrator.py
@@ -4,7 +4,7 @@
installation. Supports both VSCode Copilot (.github/hooks/) and Claude Code
(.claude/settings.json) targets.
-Hook JSON format (Claude Code — nested matcher groups):
+Hook JSON format (Claude Code -- nested matcher groups):
{
"hooks": {
"PreToolUse": [
@@ -17,7 +17,7 @@
}
}
-Hook JSON format (GitHub Copilot — flat arrays with bash/powershell keys):
+Hook JSON format (GitHub Copilot -- flat arrays with bash/powershell keys):
{
"version": 1,
"hooks": {
@@ -28,9 +28,9 @@
}
Script path handling:
- - ${CLAUDE_PLUGIN_ROOT}/path → resolved relative to package root, rewritten for target
- - ./path → relative path, resolved from hook file's parent directory, rewritten for target
- - System commands (no path separators) → passed through unchanged
+ - ${CLAUDE_PLUGIN_ROOT}/path -> resolved relative to package root, rewritten for target
+ - ./path -> relative path, resolved from hook file's parent directory, rewritten for target
+ - System commands (no path separators) -> passed through unchanged
"""
import json
@@ -418,7 +418,7 @@ def integrate_package_hooks_claude(self, package_info, project_root: Path,
with open(settings_path, 'w', encoding='utf-8') as f:
json.dump(settings, f, indent=2)
f.write('\n')
- # Don't track settings.json in target_paths — it's a shared file
+ # Don't track settings.json in target_paths -- it's a shared file
# cleaned via _apm_source markers, not file-level deletion
return HookIntegrationResult(
@@ -443,13 +443,15 @@ def sync_integration(self, apm_package, project_root: Path,
stats: Dict[str, int] = {'files_removed': 0, 'errors': 0}
if managed_files is not None:
- # Manifest-based removal — only remove tracked files
+ # Manifest-based removal -- only remove tracked files
deleted: list = []
for rel_path in managed_files:
+ # Normalize path separators for cross-platform compatibility
+ normalized = rel_path.replace("\\", "/")
# Only handle hook-related paths
is_hook = (
- rel_path.startswith(".github/hooks/")
- or rel_path.startswith(".claude/hooks/")
+ normalized.startswith(".github/hooks/")
+ or normalized.startswith(".claude/hooks/")
)
if not is_hook or ".." in rel_path:
continue
@@ -461,10 +463,10 @@ def sync_integration(self, apm_package, project_root: Path,
deleted.append(target)
except Exception:
stats['errors'] += 1
- # Batch parent cleanup — single bottom-up pass
+ # Batch parent cleanup -- single bottom-up pass
self.cleanup_empty_parents(deleted, stop_at=project_root)
else:
- # Legacy fallback — glob for old -apm suffix files
+ # Legacy fallback -- glob for old -apm suffix files
hooks_dir = project_root / ".github" / "hooks"
if hooks_dir.exists():
for hook_file in hooks_dir.glob("*-apm.json"):
@@ -474,7 +476,7 @@ def sync_integration(self, apm_package, project_root: Path,
except Exception:
stats['errors'] += 1
- # Clean APM entries from .claude/settings.json (safe — uses _apm_source marker)
+ # Clean APM entries from .claude/settings.json (safe -- uses _apm_source marker)
settings_path = project_root / ".claude" / "settings.json"
if settings_path.exists():
try:
diff --git a/src/apm_cli/integration/mcp_integrator.py b/src/apm_cli/integration/mcp_integrator.py
index f30c4c0dc..787faf80f 100644
--- a/src/apm_cli/integration/mcp_integrator.py
+++ b/src/apm_cli/integration/mcp_integrator.py
@@ -1,7 +1,7 @@
"""Standalone MCP lifecycle orchestrator.
Owns all MCP dependency resolution, installation, stale cleanup, and lockfile
-persistence logic. This is NOT a BaseIntegrator subclass — MCP integration is
+persistence logic. This is NOT a BaseIntegrator subclass -- MCP integration is
config-level orchestration (registry APIs, runtime configs, lockfile tracking),
not file-level deployment (copy/collision/sync).
@@ -32,7 +32,7 @@
class MCPIntegrator:
- """MCP lifecycle orchestrator — dependency resolution, installation, and cleanup.
+ """MCP lifecycle orchestrator -- dependency resolution, installation, and cleanup.
All methods are static: the class is a logical namespace, not a stateful
object. This keeps the extraction minimal and preserves the original
@@ -242,11 +242,11 @@ def _apply_overlay(server_info_cache: dict, dep) -> None:
# Transport overlay: select matching transport from available options
if dep.transport:
if dep.transport in ("http", "sse", "streamable-http"):
- # User prefers remote transport — remove packages to force remote path
+ # User prefers remote transport -- remove packages to force remote path
if "remotes" in info and info["remotes"]:
info.pop("packages", None)
elif dep.transport == "stdio":
- # User prefers stdio — remove remotes to force package path
+ # User prefers stdio -- remove remotes to force package path
if "packages" in info and info["packages"]:
info.pop("remotes", None)
@@ -458,7 +458,7 @@ def remove_stale(
)
for name in removed:
_rich_info(
- f"✓ Removed stale MCP server '{name}' from .vscode/mcp.json"
+ f"+ Removed stale MCP server '{name}' from .vscode/mcp.json"
)
except Exception:
logger.debug(
@@ -484,7 +484,7 @@ def remove_stale(
)
for name in removed:
_rich_info(
- f"✓ Removed stale MCP server '{name}' from Copilot CLI config"
+ f"+ Removed stale MCP server '{name}' from Copilot CLI config"
)
except Exception:
logger.debug(
@@ -508,7 +508,7 @@ def remove_stale(
codex_cfg.write_text(_toml.dumps(config), encoding="utf-8")
for name in removed:
_rich_info(
- f"✓ Removed stale MCP server '{name}' from Codex CLI config"
+ f"+ Removed stale MCP server '{name}' from Codex CLI config"
)
except Exception:
logger.debug(
@@ -646,7 +646,7 @@ def _install_for_runtime(
shared_runtime_vars=shared_runtime_vars,
)
if result["failed"]:
- click.echo(f" ✗ Failed to install {dep}")
+ click.echo(f" x Failed to install {dep}")
all_ok = False
except Exception as install_error:
logger.debug(
@@ -655,7 +655,7 @@ def _install_for_runtime(
runtime,
exc_info=True,
)
- click.echo(f" ✗ Failed to install {dep}: {install_error}")
+ click.echo(f" x Failed to install {dep}: {install_error}")
all_ok = False
return all_ok
@@ -743,7 +743,7 @@ def install(
from rich.text import Text
header = Text()
- header.append("┌─ MCP Servers (", style="cyan")
+ header.append("+- MCP Servers (", style="cyan")
header.append(str(len(mcp_deps)), style="cyan bold")
header.append(")", style="cyan")
console.print(header)
@@ -759,7 +759,7 @@ def install(
_rich_info(f"Targeting specific runtime: {runtime}")
else:
if apm_config is None:
- # Lazy load — only when the caller doesn't provide it
+ # Lazy load -- only when the caller doesn't provide it
try:
import yaml
@@ -810,19 +810,19 @@ def install(
if verbose:
if console:
- console.print("│ [cyan]ℹ️ Runtime Detection[/cyan]")
+ console.print("| [cyan][i] Runtime Detection[/cyan]")
console.print(
- f"│ └─ Installed: {', '.join(installed_runtimes)}"
+ f"| +- Installed: {', '.join(installed_runtimes)}"
)
console.print(
- f"│ └─ Used in scripts: {', '.join(script_runtimes)}"
+ f"| +- Used in scripts: {', '.join(script_runtimes)}"
)
if target_runtimes:
console.print(
- f"│ └─ Target: {', '.join(target_runtimes)} "
+ f"| +- Target: {', '.join(target_runtimes)} "
f"(available + used in scripts)"
)
- console.print("│")
+ console.print("|")
else:
_rich_info(
f"Installed runtimes: {', '.join(installed_runtimes)}"
@@ -858,7 +858,7 @@ def install(
if exclude:
target_runtimes = [r for r in target_runtimes if r != exclude]
- # All runtimes excluded — nothing to configure
+ # All runtimes excluded -- nothing to configure
if not target_runtimes and installed_runtimes:
_rich_warning(
f"All installed runtimes excluded (--exclude {exclude}), "
@@ -936,7 +936,7 @@ def install(
if console:
for dep in already_configured_servers:
console.print(
- f"│ [green]✓[/green] {dep} "
+ f"| [green]+[/green] {dep} "
f"[dim](already configured)[/dim]"
)
else:
@@ -948,7 +948,7 @@ def install(
if console:
for dep in already_configured_servers:
console.print(
- f"│ [green]✓[/green] {dep} "
+ f"| [green]+[/green] {dep} "
f"[dim](already configured)[/dim]"
)
elif verbose:
@@ -994,10 +994,10 @@ def install(
for dep in servers_to_install:
is_update = dep in servers_to_update
if console:
- action = "↻" if is_update else "⬇️"
- console.print(f"│ [cyan]{action}[/cyan] {dep}")
+ action_text = "Updating" if is_update else "Configuring"
+ console.print(f"| [cyan]>[/cyan] {dep}")
console.print(
- f"│ └─ {'Updating' if is_update else 'Configuring'} for "
+ f"| +- {action_text} for "
f"{', '.join([rt.title() for rt in target_runtimes])}..."
)
@@ -1018,7 +1018,7 @@ def install(
if console:
label = "updated" if is_update else "configured"
console.print(
- f"│ [green]✓[/green] {dep} → "
+ f"| [green]+[/green] {dep} -> "
f"{', '.join([rt.title() for rt in target_runtimes])}"
f" [dim]({label})[/dim]"
)
@@ -1027,7 +1027,7 @@ def install(
successful_updates.add(dep)
elif console:
console.print(
- f"│ [red]✗[/red] {dep} — "
+ f"| [red]x[/red] {dep} -- "
f"failed for all runtimes"
)
@@ -1077,7 +1077,7 @@ def install(
if console:
for name in already_configured_self_defined:
console.print(
- f"│ [green]✓[/green] {name} "
+ f"| [green]+[/green] {name} "
f"[dim](already configured)[/dim]"
)
elif verbose:
@@ -1101,13 +1101,13 @@ def install(
if console:
transport_label = dep.transport or "stdio"
- action = "↻" if is_update else "⬇️"
+ action_text = "Updating" if is_update else "Configuring"
console.print(
- f"│ [cyan]{action}[/cyan] {dep.name} "
+ f"| [cyan]>[/cyan] {dep.name} "
f"[dim](self-defined, {transport_label})[/dim]"
)
console.print(
- f"│ └─ {'Updating' if is_update else 'Configuring'} for "
+ f"| +- {action_text} for "
f"{', '.join([rt.title() for rt in target_runtimes])}..."
)
@@ -1127,7 +1127,7 @@ def install(
if console:
label = "updated" if is_update else "configured"
console.print(
- f"│ [green]✓[/green] {dep.name} → "
+ f"| [green]+[/green] {dep.name} -> "
f"{', '.join([rt.title() for rt in target_runtimes])}"
f" [dim]({label})[/dim]"
)
@@ -1136,7 +1136,7 @@ def install(
successful_updates.add(dep.name)
elif console:
console.print(
- f"│ [red]✗[/red] {dep.name} — "
+ f"| [red]x[/red] {dep.name} -- "
f"failed for all runtimes"
)
@@ -1159,8 +1159,8 @@ def install(
f"updated {update_count} "
f"server{'s' if update_count != 1 else ''}"
)
- console.print(f"└─ [green]{', '.join(parts).capitalize()}[/green]")
+ console.print(f"+- [green]{', '.join(parts).capitalize()}[/green]")
else:
- console.print("└─ [green]All servers up to date[/green]")
+ console.print("+- [green]All servers up to date[/green]")
return configured_count
diff --git a/src/apm_cli/integration/prompt_integrator.py b/src/apm_cli/integration/prompt_integrator.py
index 03f4b8d8d..a4440f382 100644
--- a/src/apm_cli/integration/prompt_integrator.py
+++ b/src/apm_cli/integration/prompt_integrator.py
@@ -60,7 +60,7 @@ def get_target_filename(self, source_file: Path, package_name: str) -> str:
Returns:
str: Target filename (e.g., accessibility-audit.prompt.md)
"""
- # Use original filename — no -apm suffix
+ # Use original filename -- no -apm suffix
return source_file.name
diff --git a/src/apm_cli/integration/skill_integrator.py b/src/apm_cli/integration/skill_integrator.py
index 3e11fe3b1..621649ba7 100644
--- a/src/apm_cli/integration/skill_integrator.py
+++ b/src/apm_cli/integration/skill_integrator.py
@@ -153,15 +153,15 @@ def normalize_skill_name(name: str) -> str:
# 3. Default to INSTRUCTIONS for instruction-only packages
#
# Per skill-strategy.md Decision 2: "Skills are explicit, not implicit"
-# - Packages with SKILL.md OR explicit type: skill/hybrid → become skills
-# - Packages with only instructions → compile to AGENTS.md, NOT skills
+# - Packages with SKILL.md OR explicit type: skill/hybrid -> become skills
+# - Packages with only instructions -> compile to AGENTS.md, NOT skills
def get_effective_type(package_info) -> "PackageContentType":
"""Get effective package content type based on package structure.
Determines type by:
- 1. Package has SKILL.md (PackageType.CLAUDE_SKILL or HYBRID) → SKILL
- 2. Otherwise → INSTRUCTIONS (compile to AGENTS.md only)
+ 1. Package has SKILL.md (PackageType.CLAUDE_SKILL or HYBRID) -> SKILL
+ 2. Otherwise -> INSTRUCTIONS (compile to AGENTS.md only)
Args:
package_info: PackageInfo object containing package metadata
@@ -256,7 +256,7 @@ def copy_skill_to_target(
- Directory structure preservation
- Compatibility copy to .claude/skills/ when .claude/ exists (T7)
- Source SKILL.md is copied verbatim — no metadata injection.
+ Source SKILL.md is copied verbatim -- no metadata injection.
Copies:
- SKILL.md (required)
@@ -444,7 +444,7 @@ def _promote_sub_skills(sub_skills_dir: Path, target_skills_root: Path, parent_n
target_skills_root: Root skills directory (e.g. .github/skills/ or .claude/skills/).
parent_name: Name of the parent skill (used in warning messages).
warn: Whether to emit a warning on name collisions.
- owned_by: Map of skill_name → owner_package_name from the lockfile.
+ owned_by: Map of skill_name -> owner_package_name from the lockfile.
When provided, warnings are suppressed for self-overwrites.
diagnostics: Optional DiagnosticCollector for deferred warning output.
@@ -491,7 +491,7 @@ def _promote_sub_skills(sub_skills_dir: Path, target_skills_root: Path, parent_n
@staticmethod
def _build_skill_ownership_map(project_root: Path) -> dict[str, str]:
- """Build a map of skill_name → owner_package_name from the lockfile.
+ """Build a map of skill_name -> owner_package_name from the lockfile.
Used to distinguish self-overwrites (no warning) from cross-package
conflicts (warning) when promoting sub-skills.
@@ -505,7 +505,7 @@ def _build_skill_ownership_map(project_root: Path) -> dict[str, str]:
for dep in lockfile.get_all_dependencies():
owner = (dep.virtual_path or dep.repo_url).rsplit("/", 1)[-1]
for deployed_path in dep.deployed_files:
- # e.g. ".github/skills/context-map" → "context-map"
+ # e.g. ".github/skills/context-map" -> "context-map"
skill_name = deployed_path.rstrip("/").rsplit("/", 1)[-1]
owned_by[skill_name] = owner
return owned_by
@@ -565,7 +565,7 @@ def _integrate_native_skill(
The skill folder name is the source folder name (e.g., `mcp-builder`),
validated and normalized per the agentskills.io spec.
- Source SKILL.md is copied verbatim — no metadata injection. Orphan
+ Source SKILL.md is copied verbatim -- no metadata injection. Orphan
detection uses apm.lock via directory name matching instead.
T7 Enhancement: Also copies to .claude/skills/ when .claude/ folder exists.
@@ -590,7 +590,7 @@ def _integrate_native_skill(
package_path = package_info.install_path
# Use the source folder name as the skill name
- # e.g., apm_modules/ComposioHQ/awesome-claude-skills/mcp-builder → mcp-builder
+ # e.g., apm_modules/ComposioHQ/awesome-claude-skills/mcp-builder -> mcp-builder
raw_skill_name = package_path.name
# Validate skill name per agentskills.io spec
@@ -618,7 +618,7 @@ def _integrate_native_skill(
github_skill_dir = project_root / ".github" / "skills" / skill_name
github_skill_md = github_skill_dir / "SKILL.md"
- # Always copy — source integrity is preserved, orphan detection uses apm.lock
+ # Always copy -- source integrity is preserved, orphan detection uses apm.lock
skill_created = not github_skill_dir.exists()
skill_updated = not skill_created
@@ -683,7 +683,7 @@ def integrate_package_skill(self, package_info, project_root: Path, diagnostics=
Copies native skills (packages with SKILL.md at root) to .github/skills/
and optionally .claude/skills/. Also promotes any sub-skills from .apm/skills/.
- Packages without SKILL.md at root are not installed as skills — only their
+ Packages without SKILL.md at root are not installed as skills -- only their
sub-skills (if any) are promoted.
Args:
@@ -694,8 +694,8 @@ def integrate_package_skill(self, package_info, project_root: Path, diagnostics=
SkillIntegrationResult: Results of the integration operation
"""
# Check if package type allows skill installation (T4 routing)
- # SKILL and HYBRID → install as skill
- # INSTRUCTIONS and PROMPTS → skip skill installation
+ # SKILL and HYBRID -> install as skill
+ # INSTRUCTIONS and PROMPTS -> skip skill installation
if not should_install_skill(package_info):
# Even non-skill packages may ship sub-skills under .apm/skills/.
# Promote them so Copilot can discover them independently.
@@ -735,7 +735,7 @@ def integrate_package_skill(self, package_info, project_root: Path, diagnostics=
if source_skill_md.exists():
return self._integrate_native_skill(package_info, project_root, source_skill_md, diagnostics=diagnostics)
- # No SKILL.md at root — not a skill package.
+ # No SKILL.md at root -- not a skill package.
# Still promote any sub-skills shipped under .apm/skills/.
sub_skills_count, sub_deployed = self._promote_sub_skills_standalone(
package_info, project_root, diagnostics=diagnostics
@@ -770,7 +770,7 @@ def sync_integration(self, apm_package, project_root: Path,
stats = {'files_removed': 0, 'errors': 0}
if managed_files is not None:
- # Manifest-based removal — only remove tracked skill directories
+ # Manifest-based removal -- only remove tracked skill directories
project_root_resolved = project_root.resolve()
for rel_path in managed_files:
is_skill = (
diff --git a/src/apm_cli/integration/skill_transformer.py b/src/apm_cli/integration/skill_transformer.py
index 025aaf0d6..cb21aed1f 100644
--- a/src/apm_cli/integration/skill_transformer.py
+++ b/src/apm_cli/integration/skill_transformer.py
@@ -37,12 +37,12 @@ def to_hyphen_case(name: str) -> str:
class SkillTransformer:
"""Transforms SKILL.md to platform-native formats.
- For VSCode: SKILL.md → .github/agents/{name}.agent.md
+ For VSCode: SKILL.md -> .github/agents/{name}.agent.md
For Claude: SKILL.md stays as-is (native format)
"""
def transform_to_agent(self, skill: Skill, output_dir: Path, dry_run: bool = False) -> Optional[Path]:
- """Transform SKILL.md → .github/agents/{name}.agent.md for VSCode.
+ """Transform SKILL.md -> .github/agents/{name}.agent.md for VSCode.
Note: Only creates the .agent.md file. Bundled resources stay in apm_modules/.
diff --git a/src/apm_cli/models/dependency.py b/src/apm_cli/models/dependency.py
index 6eeba1fdd..87f0f7f6d 100644
--- a/src/apm_cli/models/dependency.py
+++ b/src/apm_cli/models/dependency.py
@@ -84,9 +84,9 @@ def is_virtual_subdirectory(self) -> bool:
- Is a directory path (likely containing SKILL.md or apm.yml)
Examples:
- - ComposioHQ/awesome-claude-skills/brand-guidelines → True
- - owner/repo/prompts/file.prompt.md → False (is_virtual_file)
- - owner/repo/collections/name → False (is_virtual_collection)
+ - ComposioHQ/awesome-claude-skills/brand-guidelines -> True
+ - owner/repo/prompts/file.prompt.md -> False (is_virtual_file)
+ - owner/repo/collections/name -> False (is_virtual_collection)
"""
if not self.is_virtual or not self.virtual_path:
return False
@@ -97,9 +97,9 @@ def get_virtual_package_name(self) -> str:
"""Generate a package name for this virtual package.
For virtual packages, we create a sanitized name from the path:
- - owner/repo/prompts/code-review.prompt.md → repo-code-review
- - owner/repo/collections/project-planning → repo-project-planning
- - owner/repo/collections/project-planning.collection.yml → repo-project-planning
+ - owner/repo/prompts/code-review.prompt.md -> repo-code-review
+ - owner/repo/collections/project-planning -> repo-project-planning
+ - owner/repo/collections/project-planning.collection.yml -> repo-project-planning
"""
if not self.is_virtual or not self.virtual_path:
return self.repo_url.split('/')[-1] # Return repo name as fallback
@@ -112,8 +112,8 @@ def get_virtual_package_name(self) -> str:
path_parts = self.virtual_path.split('/')
if self.is_virtual_collection():
# For collections: use the collection name without extension
- # collections/project-planning → project-planning
- # collections/project-planning.collection.yml → project-planning
+ # collections/project-planning -> project-planning
+ # collections/project-planning.collection.yml -> project-planning
collection_name = path_parts[-1]
# Strip .collection.yml/.collection.yaml extension if present
for ext in ('.collection.yml', '.collection.yaml'):
@@ -123,7 +123,7 @@ def get_virtual_package_name(self) -> str:
return f"{repo_name}-{collection_name}"
else:
# For individual files: use the filename without extension
- # prompts/code-review.prompt.md → code-review
+ # prompts/code-review.prompt.md -> code-review
filename = path_parts[-1]
for ext in self.VIRTUAL_FILE_EXTENSIONS:
if filename.endswith(ext):
@@ -148,13 +148,13 @@ def to_canonical(self) -> str:
"""Return the canonical form of this dependency for storage in apm.yml.
Follows the Docker-style default-registry convention:
- - Default host (github.com) is stripped → owner/repo
- - Non-default hosts are preserved → gitlab.com/owner/repo
- - Virtual paths are appended → owner/repo/path/to/thing
- - Refs are appended with # → owner/repo#v1.0
- - Aliases are appended with @ → owner/repo@my-alias
+ - Default host (github.com) is stripped -> owner/repo
+ - Non-default hosts are preserved -> gitlab.com/owner/repo
+ - Virtual paths are appended -> owner/repo/path/to/thing
+ - Refs are appended with # -> owner/repo#v1.0
+ - Aliases are appended with @ -> owner/repo@my-alias
- No .git suffix, no https://, no git@ — just the canonical identifier.
+ No .git suffix, no https://, no git@ -- just the canonical identifier.
Returns:
str: Canonical dependency string
@@ -221,7 +221,7 @@ def canonicalize(raw: str) -> str:
def get_canonical_dependency_string(self) -> str:
"""Get the host-blind canonical string for filesystem and orphan-detection matching.
- This returns repo_url (+ virtual_path) without host prefix — it matches
+ This returns repo_url (+ virtual_path) without host prefix -- it matches
the filesystem layout in apm_modules/ which is also host-blind.
For identity-based matching that includes non-default hosts, use get_identity().
@@ -293,8 +293,8 @@ def _normalize_ssh_protocol_url(url: str) -> str:
"""Normalize ssh:// protocol URLs to git@ format for consistent parsing.
Converts:
- - ssh://git@gitlab.com/owner/repo.git → git@gitlab.com:owner/repo.git
- - ssh://git@host:port/owner/repo.git → git@host:owner/repo.git
+ - ssh://git@gitlab.com/owner/repo.git -> git@gitlab.com:owner/repo.git
+ - ssh://git@host:port/owner/repo.git -> git@host:owner/repo.git
Non-SSH URLs are returned unchanged.
"""
@@ -754,7 +754,7 @@ def parse(cls, dependency_str: str) -> "DependencyReference":
else:
if len(path_parts) < 2:
raise ValueError(f"Invalid repository path: expected at least 'user/repo', got '{path}'")
- # HTTPS URLs cannot embed virtual paths — reject virtual file extensions
+ # HTTPS URLs cannot embed virtual paths -- reject virtual file extensions
for pp in path_parts:
if any(pp.endswith(ext) for ext in cls.VIRTUAL_FILE_EXTENSIONS):
raise ValueError(
@@ -796,7 +796,7 @@ def parse(cls, dependency_str: str) -> "DependencyReference":
raise ValueError(f"Invalid repository format: {repo_url}. Expected 'user/repo'")
if not all(re.match(r'^[a-zA-Z0-9._-]+$', s) for s in segments):
raise ValueError(f"Invalid repository format: {repo_url}. Contains invalid characters")
- # SSH/HTTPS URLs cannot embed virtual paths — reject virtual file extensions
+ # SSH/HTTPS URLs cannot embed virtual paths -- reject virtual file extensions
for seg in segments:
if any(seg.endswith(ext) for ext in cls.VIRTUAL_FILE_EXTENSIONS):
raise ValueError(
@@ -881,7 +881,7 @@ class MCPDependency:
args: Optional[Any] = None # Dict for overlay variable overrides, List for self-defined positional args
version: Optional[str] = None # Pin specific server version
registry: Optional[Any] = None # None=default, False=self-defined, str=custom registry URL
- package: Optional[str] = None # "npm" | "pypi" | "oci" — select package type
+ package: Optional[str] = None # "npm" | "pypi" | "oci" -- select package type
headers: Optional[Dict[str, str]] = None # Custom HTTP headers for remote endpoints
tools: Optional[List[str]] = None # Restrict exposed tools (default is ["*"])
url: Optional[str] = None # Required for self-defined http/sse transports
diff --git a/src/apm_cli/models/validation.py b/src/apm_cli/models/validation.py
index 673282874..d552f687e 100644
--- a/src/apm_cli/models/validation.py
+++ b/src/apm_cli/models/validation.py
@@ -118,11 +118,11 @@ def has_issues(self) -> bool:
def summary(self) -> str:
"""Get a summary of validation results."""
if self.is_valid and not self.warnings:
- return "✅ Package is valid"
+ return "[+] Package is valid"
elif self.is_valid and self.warnings:
- return f"⚠️ Package is valid with {len(self.warnings)} warning(s)"
+ return f"[!] Package is valid with {len(self.warnings)} warning(s)"
else:
- return f"❌ Package is invalid with {len(self.errors)} error(s)"
+ return f"[x] Package is invalid with {len(self.errors)} error(s)"
def _has_hook_json(package_path: Path) -> bool:
@@ -164,7 +164,7 @@ def validate_apm_package(package_path: Path) -> ValidationResult:
apm_yml_path = package_path / "apm.yml"
skill_md_path = package_path / "SKILL.md"
- # Check for plugin.json — optional metadata, not a detection gate
+ # Check for plugin.json -- optional metadata, not a detection gate
from ..utils.helpers import find_plugin_json
plugin_json_path = find_plugin_json(package_path)
@@ -290,7 +290,7 @@ def _validate_marketplace_plugin(package_path: Path, plugin_json_path: Optional[
"""Validate a Claude plugin and synthesize apm.yml.
plugin.json is **optional** per the spec. When present it provides
- metadata (name, version, description …). When absent the plugin name is
+ metadata (name, version, description ...). When absent the plugin name is
derived from the directory name and all other fields default gracefully.
Args:
diff --git a/src/apm_cli/output/formatters.py b/src/apm_cli/output/formatters.py
index a6137722a..556b5f7f3 100644
--- a/src/apm_cli/output/formatters.py
+++ b/src/apm_cli/output/formatters.py
@@ -128,7 +128,7 @@ def _format_final_summary(self, results: CompilationResults) -> List[str]:
# Build metrics with baselines and improvements when available
metrics_lines = [
- f"┌─ Context efficiency: {efficiency_pct}"
+ f"+- Context efficiency: {efficiency_pct}"
]
if stats.efficiency_improvement is not None:
@@ -138,18 +138,18 @@ def _format_final_summary(self, results: CompilationResults) -> List[str]:
if stats.pollution_improvement is not None:
pollution_pct = f"{(1.0 - stats.pollution_improvement) * 100:.1f}%"
improvement_pct = f"-{stats.pollution_improvement * 100:.0f}%" if stats.pollution_improvement > 0 else f"+{abs(stats.pollution_improvement) * 100:.0f}%"
- metrics_lines.append(f"├─ Average pollution: {pollution_pct} (improvement: {improvement_pct})")
+ metrics_lines.append(f"|- Average pollution: {pollution_pct} (improvement: {improvement_pct})")
if stats.placement_accuracy is not None:
accuracy_pct = f"{stats.placement_accuracy * 100:.1f}%"
- metrics_lines.append(f"├─ Placement accuracy: {accuracy_pct} (mathematical optimum)")
+ metrics_lines.append(f"|- Placement accuracy: {accuracy_pct} (mathematical optimum)")
if stats.generation_time_ms is not None:
- metrics_lines.append(f"└─ Generation time: {stats.generation_time_ms}ms")
+ metrics_lines.append(f"+- Generation time: {stats.generation_time_ms}ms")
else:
- # Change last ├─ to └─
+ # Change last |- to +-
if len(metrics_lines) > 1:
- metrics_lines[-1] = metrics_lines[-1].replace("├─", "└─")
+ metrics_lines[-1] = metrics_lines[-1].replace("|-", "+-")
for line in metrics_lines:
if self.use_color:
@@ -171,7 +171,7 @@ def _format_final_summary(self, results: CompilationResults) -> List[str]:
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
# Use proper tree formatting
- prefix = "├─" if summary != results.placement_summaries[-1] else "└─"
+ prefix = "|-" if summary != results.placement_summaries[-1] else "+-"
line = f"{prefix} {rel_path:<30} {content_text} from {source_text}"
if self.use_color:
@@ -220,7 +220,7 @@ def _format_project_discovery(self, analysis) -> List[str]:
# Constitution detection (first priority)
if analysis.constitution_detected:
- constitution_line = f"├─ Constitution detected: {analysis.constitution_path}"
+ constitution_line = f"|- Constitution detected: {analysis.constitution_path}"
if self.use_color:
lines.append(self._styled(constitution_line, "dim"))
else:
@@ -229,9 +229,9 @@ def _format_project_discovery(self, analysis) -> List[str]:
# Structure tree with more detailed information
file_types_summary = analysis.get_file_types_summary() if hasattr(analysis, 'get_file_types_summary') else "various"
tree_lines = [
- f"├─ {analysis.directories_scanned} directories scanned (max depth: {analysis.max_depth})",
- f"├─ {analysis.files_analyzed} files analyzed across {len(analysis.file_types_detected)} file types ({file_types_summary})",
- f"└─ {analysis.instruction_patterns_detected} instruction patterns detected"
+ f"|- {analysis.directories_scanned} directories scanned (max depth: {analysis.max_depth})",
+ f"|- {analysis.files_analyzed} files analyzed across {len(analysis.file_types_detected)} file types ({file_types_summary})",
+ f"+- {analysis.instruction_patterns_detected} instruction patterns detected"
]
for line in tree_lines:
@@ -313,7 +313,7 @@ def _format_optimization_progress(self, decisions: List[OptimizationDecision], a
# Fallback to simplified text display for non-Rich environments
# Add constitution first if detected
if analysis and analysis.constitution_detected:
- lines.append("** constitution.md ALL → ./AGENTS.md (rel: 100%)")
+ lines.append("** constitution.md ALL -> ./AGENTS.md (rel: 100%)")
for decision in decisions:
pattern_display = decision.pattern if decision.pattern else "(global)"
@@ -332,10 +332,10 @@ def _format_optimization_progress(self, decisions: List[OptimizationDecision], a
placement = self._get_relative_display_path(decision.placement_directories[0])
relevance = getattr(decision, 'relevance_score', 0.0) if hasattr(decision, 'relevance_score') else 1.0
pollution = getattr(decision, 'pollution_score', 0.0) if hasattr(decision, 'pollution_score') else 0.0
- line = f"{pattern_display:<25} {source_display:<15} {ratio_display:<10} → {placement:<25} (rel: {relevance*100:.0f}%)"
+ line = f"{pattern_display:<25} {source_display:<15} {ratio_display:<10} -> {placement:<25} (rel: {relevance*100:.0f}%)"
else:
placement_count = len(decision.placement_directories)
- line = f"{pattern_display:<25} {source_display:<15} {ratio_display:<10} → {placement_count} locations"
+ line = f"{pattern_display:<25} {source_display:<15} {ratio_display:<10} -> {placement_count} locations"
lines.append(line)
@@ -365,7 +365,7 @@ def _format_results_summary(self, results: CompilationResults) -> List[str]:
# Build metrics with baselines and improvements when available
metrics_lines = [
- f"┌─ Context efficiency: {efficiency_pct}"
+ f"+- Context efficiency: {efficiency_pct}"
]
if stats.efficiency_improvement is not None:
@@ -375,18 +375,18 @@ def _format_results_summary(self, results: CompilationResults) -> List[str]:
if stats.pollution_improvement is not None:
pollution_pct = f"{(1.0 - stats.pollution_improvement) * 100:.1f}%"
improvement_pct = f"-{stats.pollution_improvement * 100:.0f}%" if stats.pollution_improvement > 0 else f"+{abs(stats.pollution_improvement) * 100:.0f}%"
- metrics_lines.append(f"├─ Average pollution: {pollution_pct} (improvement: {improvement_pct})")
+ metrics_lines.append(f"|- Average pollution: {pollution_pct} (improvement: {improvement_pct})")
if stats.placement_accuracy is not None:
accuracy_pct = f"{stats.placement_accuracy * 100:.1f}%"
- metrics_lines.append(f"├─ Placement accuracy: {accuracy_pct} (mathematical optimum)")
+ metrics_lines.append(f"|- Placement accuracy: {accuracy_pct} (mathematical optimum)")
if stats.generation_time_ms is not None:
- metrics_lines.append(f"└─ Generation time: {stats.generation_time_ms}ms")
+ metrics_lines.append(f"+- Generation time: {stats.generation_time_ms}ms")
else:
- # Change last ├─ to └─
+ # Change last |- to +-
if len(metrics_lines) > 1:
- metrics_lines[-1] = metrics_lines[-1].replace("├─", "└─")
+ metrics_lines[-1] = metrics_lines[-1].replace("|-", "+-")
for line in metrics_lines:
if self.use_color:
@@ -408,7 +408,7 @@ def _format_results_summary(self, results: CompilationResults) -> List[str]:
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
# Use proper tree formatting
- prefix = "├─" if summary != results.placement_summaries[-1] else "└─"
+ prefix = "|-" if summary != results.placement_summaries[-1] else "+-"
line = f"{prefix} {rel_path:<30} {content_text} from {source_text}"
if self.use_color:
@@ -433,16 +433,16 @@ def _format_dry_run_summary(self, results: CompilationResults) -> List[str]:
instruction_text = f"{summary.instruction_count} instruction{'s' if summary.instruction_count != 1 else ''}"
source_text = f"{summary.source_count} source{'s' if summary.source_count != 1 else ''}"
- line = f"├─ {rel_path:<30} {instruction_text}, {source_text}"
+ line = f"|- {rel_path:<30} {instruction_text}, {source_text}"
if self.use_color:
lines.append(self._styled(line, "dim"))
else:
lines.append(line)
- # Change last ├─ to └─
+ # Change last |- to +-
if lines and len(lines) > 1:
- lines[-1] = lines[-1].replace("├─", "└─")
+ lines[-1] = lines[-1].replace("|-", "+-")
lines.append("")
@@ -490,19 +490,19 @@ def _format_mathematical_analysis(self, decisions: List[OptimizationDecision]) -
if score < 0.3:
dist_display = f"{score:.3f} (Low)"
strategy_name = "Single Point"
- coverage_status = "✅ Perfect"
+ coverage_status = "[+] Perfect"
elif score > 0.7:
dist_display = f"{score:.3f} (High)"
strategy_name = "Distributed"
- coverage_status = "✅ Universal"
+ coverage_status = "[+] Universal"
else:
dist_display = f"{score:.3f} (Medium)"
strategy_name = "Selective Multi"
# Check if root placement was used (indicates coverage fallback)
if any("." == str(p) or p.name == "" for p in decision.placement_directories):
- coverage_status = "⚠️ Root Fallback"
+ coverage_status = "[!] Root Fallback"
else:
- coverage_status = "✅ Verified"
+ coverage_status = "[+] Verified"
strategy_table.add_row(pattern, source_display, dist_display, strategy_name, coverage_status)
@@ -532,14 +532,14 @@ def _format_mathematical_analysis(self, decisions: List[OptimizationDecision]) -
# Analyze coverage outcome
if str(decision.placement_directories[0]).endswith('.'):
- coverage_result = "Root → All files inherit"
+ coverage_result = "Root -> All files inherit"
elif decision.distribution_score < 0.3:
- coverage_result = "Local → Perfect efficiency"
+ coverage_result = "Local -> Perfect efficiency"
else:
- coverage_result = "Selective → Coverage verified"
+ coverage_result = "Selective -> Coverage verified"
else:
placement = f"{len(decision.placement_directories)} locations"
- coverage_result = "Multi-point → Full coverage"
+ coverage_result = "Multi-point -> Full coverage"
coverage_table.add_row(pattern, matching_files, placement, coverage_result)
@@ -554,9 +554,9 @@ def _format_mathematical_analysis(self, decisions: List[OptimizationDecision]) -
lines.append("")
# Updated Mathematical Foundation Panel
- foundation_text = """Objective: minimize Σ(context_pollution × directory_weight)
-Constraints: ∀file_matching_pattern → can_inherit_instruction
-Variables: placement_matrix ∈ {0,1}
+ foundation_text = """Objective: minimize sum(context_pollution x directory_weight)
+Constraints: for_allfile_matching_pattern -> can_inherit_instruction
+Variables: placement_matrix in {0,1}
Algorithm: Three-tier strategy with hierarchical coverage verification
Coverage Guarantee: Every file can access applicable instructions through
@@ -584,13 +584,13 @@ def _format_mathematical_analysis(self, decisions: List[OptimizationDecision]) -
pattern = decision.pattern if decision.pattern else "(global)"
score = f"{decision.distribution_score:.3f}"
strategy = decision.strategy.value
- coverage = "✅ Verified" if decision.distribution_score < 0.7 else "⚠️ Root Fallback"
+ coverage = "[+] Verified" if decision.distribution_score < 0.7 else "[!] Root Fallback"
lines.append(f" {pattern:<30} {score:<8} {strategy:<15} {coverage}")
lines.append("")
lines.append("Mathematical Foundation:")
- lines.append(" Objective: minimize Σ(context_pollution × directory_weight)")
- lines.append(" Constraints: ∀file_matching_pattern → can_inherit_instruction")
+ lines.append(" Objective: minimize sum(context_pollution x directory_weight)")
+ lines.append(" Constraints: for_allfile_matching_pattern -> can_inherit_instruction")
lines.append(" Algorithm: Three-tier strategy with coverage verification")
lines.append(" Principle: Coverage guarantee takes priority over efficiency")
@@ -699,33 +699,33 @@ def _format_detailed_metrics(self, stats) -> List[str]:
# Add interpretation guide
if self.console:
try:
- interpretation_text = """📊 How These Metrics Are Calculated
+ interpretation_text = """How These Metrics Are Calculated
Context Efficiency = Average across all directories of (Relevant Instructions / Total Instructions)
-• For each directory, APM analyzes what instructions agents would inherit from AGENTS.md files
-• Calculates ratio of instructions that apply to files in that directory vs total instructions loaded
-• Takes weighted average across all project directories with files
+* For each directory, APM analyzes what instructions agents would inherit from AGENTS.md files
+* Calculates ratio of instructions that apply to files in that directory vs total instructions loaded
+* Takes weighted average across all project directories with files
Pollution Level = 100% - Context Efficiency (inverse relationship)
-• High pollution = agents load many irrelevant instructions when working in specific directories
-• Low pollution = agents see mostly relevant instructions for their current context
+* High pollution = agents load many irrelevant instructions when working in specific directories
+* Low pollution = agents see mostly relevant instructions for their current context
-🎯 Interpretation Benchmarks
+Interpretation Benchmarks
Context Efficiency:
-• 80-100%: Excellent - Instructions perfectly targeted to usage context
-• 60-80%: Good - Well-optimized with minimal wasted context
-• 40-60%: Fair - Some optimization opportunities exist
-• 20-40%: Poor - Significant context pollution, consider restructuring
-• 0-20%: Very Poor - High pollution, instructions poorly distributed
+* 80-100%: Excellent - Instructions perfectly targeted to usage context
+* 60-80%: Good - Well-optimized with minimal wasted context
+* 40-60%: Fair - Some optimization opportunities exist
+* 20-40%: Poor - Significant context pollution, consider restructuring
+* 0-20%: Very Poor - High pollution, instructions poorly distributed
Pollution Level:
-• 0-10%: Excellent - Agents see highly relevant instructions only
-• 10-25%: Good - Low noise, mostly relevant context
-• 25-50%: Fair - Moderate noise, some irrelevant instructions
-• 50%+: Poor - High noise, agents see many irrelevant instructions
+* 0-10%: Excellent - Agents see highly relevant instructions only
+* 10-25%: Good - Low noise, mostly relevant context
+* 25-50%: Fair - Moderate noise, some irrelevant instructions
+* 50%+: Poor - High noise, agents see many irrelevant instructions
-💡 Example: 36.7% efficiency means agents working in specific directories see only 36.7% relevant instructions and 63.3% irrelevant context pollution."""
+Example: 36.7% efficiency means agents working in specific directories see only 36.7% relevant instructions and 63.3% irrelevant context pollution."""
panel = Panel(interpretation_text, title="Metrics Guide", border_style="dim", title_align="left")
with self.console.capture() as capture:
@@ -737,8 +737,8 @@ def _format_detailed_metrics(self, stats) -> List[str]:
# Fallback to simple text
lines.extend([
"Metrics Guide:",
- "• Context Efficiency 80-100%: Excellent | 60-80%: Good | 40-60%: Fair | <40%: Poor",
- "• Pollution 0-10%: Excellent | 10-25%: Good | 25-50%: Fair | >50%: Poor"
+ "* Context Efficiency 80-100%: Excellent | 60-80%: Good | 40-60%: Fair | <40%: Poor",
+ "* Pollution 0-10%: Excellent | 10-25%: Good | 25-50%: Fair | >50%: Poor"
])
else:
# Fallback for non-Rich environments
@@ -780,9 +780,9 @@ def _format_issues(self, warnings: List[str], errors: List[str]) -> List[str]:
# Errors first
for error in errors:
if self.use_color:
- lines.append(self._styled(f"✗ Error: {error}", "red"))
+ lines.append(self._styled(f"x Error: {error}", "red"))
else:
- lines.append(f"✗ Error: {error}")
+ lines.append(f"x Error: {error}")
# Then warnings - handle multi-line warnings as cohesive blocks
for warning in warnings:
@@ -791,9 +791,9 @@ def _format_issues(self, warnings: List[str], errors: List[str]) -> List[str]:
warning_lines = warning.split('\n')
# First line gets the warning symbol and styling
if self.use_color:
- lines.append(self._styled(f"⚠ Warning: {warning_lines[0]}", "yellow"))
+ lines.append(self._styled(f"[!] Warning: {warning_lines[0]}", "yellow"))
else:
- lines.append(f"⚠ Warning: {warning_lines[0]}")
+ lines.append(f"[!] Warning: {warning_lines[0]}")
# Subsequent lines are indented and styled consistently
for line in warning_lines[1:]:
@@ -805,20 +805,20 @@ def _format_issues(self, warnings: List[str], errors: List[str]) -> List[str]:
else:
# Single-line warning - standard format
if self.use_color:
- lines.append(self._styled(f"⚠ Warning: {warning}", "yellow"))
+ lines.append(self._styled(f"[!] Warning: {warning}", "yellow"))
else:
- lines.append(f"⚠ Warning: {warning}")
+ lines.append(f"[!] Warning: {warning}")
return lines
def _get_strategy_symbol(self, strategy: PlacementStrategy) -> str:
"""Get symbol for placement strategy."""
symbols = {
- PlacementStrategy.SINGLE_POINT: "●",
- PlacementStrategy.SELECTIVE_MULTI: "◆",
- PlacementStrategy.DISTRIBUTED: "◇"
+ PlacementStrategy.SINGLE_POINT: "*",
+ PlacementStrategy.SELECTIVE_MULTI: "*",
+ PlacementStrategy.DISTRIBUTED: "*"
}
- return symbols.get(strategy, "•")
+ return symbols.get(strategy, "*")
def _get_strategy_color(self, strategy: PlacementStrategy) -> str:
"""Get color for placement strategy."""
@@ -853,29 +853,29 @@ def _format_coverage_explanation(self, stats) -> List[str]:
efficiency = stats.efficiency_percentage
if efficiency < 30:
- lines.append("⚠️ Low Efficiency Detected:")
- lines.append(" • Coverage guarantee requires some instructions at root level")
- lines.append(" • This creates pollution for specialized directories")
- lines.append(" • Trade-off: Guaranteed coverage vs. optimal efficiency")
- lines.append(" • Alternative: Higher efficiency with coverage violations (data loss)")
+ lines.append("[!] Low Efficiency Detected:")
+ lines.append(" * Coverage guarantee requires some instructions at root level")
+ lines.append(" * This creates pollution for specialized directories")
+ lines.append(" * Trade-off: Guaranteed coverage vs. optimal efficiency")
+ lines.append(" * Alternative: Higher efficiency with coverage violations (data loss)")
lines.append("")
- lines.append("💡 This may be mathematically optimal given coverage constraints")
+ lines.append("This may be mathematically optimal given coverage constraints")
elif efficiency < 60:
- lines.append("✅ Moderate Efficiency:")
- lines.append(" • Good balance between coverage and efficiency")
- lines.append(" • Some coverage-driven pollution is acceptable")
- lines.append(" • Most patterns are well-localized")
+ lines.append("[+] Moderate Efficiency:")
+ lines.append(" * Good balance between coverage and efficiency")
+ lines.append(" * Some coverage-driven pollution is acceptable")
+ lines.append(" * Most patterns are well-localized")
else:
- lines.append("🎯 High Efficiency:")
- lines.append(" • Excellent pattern locality achieved")
- lines.append(" • Minimal coverage conflicts")
- lines.append(" • Instructions are optimally placed")
+ lines.append("High Efficiency:")
+ lines.append(" * Excellent pattern locality achieved")
+ lines.append(" * Minimal coverage conflicts")
+ lines.append(" * Instructions are optimally placed")
lines.append("")
- lines.append("📚 Why Coverage Takes Priority:")
- lines.append(" • Every file must access applicable instructions")
- lines.append(" • Hierarchical inheritance prevents data loss")
- lines.append(" • Better low efficiency than missing instructions")
+ lines.append("Why Coverage Takes Priority:")
+ lines.append(" * Every file must access applicable instructions")
+ lines.append(" * Hierarchical inheritance prevents data loss")
+ lines.append(" * Better low efficiency than missing instructions")
return lines
diff --git a/src/apm_cli/output/script_formatters.py b/src/apm_cli/output/script_formatters.py
index b2e4321dd..d73de659c 100644
--- a/src/apm_cli/output/script_formatters.py
+++ b/src/apm_cli/output/script_formatters.py
@@ -40,9 +40,9 @@ def format_script_header(self, script_name: str, params: Dict[str, str]) -> List
# Main header
if self.use_color:
- lines.append(self._styled(f"🚀 Running script: {script_name}", "cyan bold"))
+ lines.append(self._styled(f" Running script: {script_name}", "cyan bold"))
else:
- lines.append(f"🚀 Running script: {script_name}")
+ lines.append(f" Running script: {script_name}")
# Parameters tree if any exist
if params:
@@ -82,15 +82,15 @@ def format_compilation_progress(self, prompt_files: List[str]) -> List[str]:
# Show each file being compiled
for prompt_file in prompt_files:
- file_line = f"├─ {prompt_file}"
+ file_line = f"|- {prompt_file}"
if self.use_color:
lines.append(self._styled(file_line, "dim"))
else:
lines.append(file_line)
- # Change last ├─ to └─
+ # Change last |- to +-
if lines and len(lines) > 1:
- lines[-1] = lines[-1].replace("├─", "└─")
+ lines[-1] = lines[-1].replace("|-", "+-")
return lines
@@ -124,14 +124,14 @@ def format_runtime_execution(self, runtime: str, command: str, content_length: i
lines.append(f"Executing {runtime} runtime...")
# Command structure
- command_line = f"├─ Command: {command}"
+ command_line = f"|- Command: {command}"
if self.use_color:
lines.append(self._styled(command_line, "dim"))
else:
lines.append(command_line)
# Content size
- content_line = f"└─ Prompt content: {content_length:,} characters"
+ content_line = f"+- Prompt content: {content_length:,} characters"
if self.use_color:
lines.append(self._styled(content_line, "dim"))
else:
@@ -175,14 +175,14 @@ def format_content_preview(self, content: str, max_preview: int = 200) -> List[s
lines.extend(panel_output.split('\n'))
except:
# Fallback to simple formatting
- lines.append("─" * 50)
+ lines.append("-" * 50)
lines.append(content_preview)
- lines.append("─" * 50)
+ lines.append("-" * 50)
else:
# Simple text fallback
- lines.append("─" * 50)
+ lines.append("-" * 50)
lines.append(content_preview)
- lines.append("─" * 50)
+ lines.append("-" * 50)
return lines
@@ -207,15 +207,15 @@ def format_environment_setup(self, runtime: str, env_vars_set: List[str]) -> Lis
lines.append("Environment setup:")
for env_var in env_vars_set:
- env_line = f"├─ {env_var}: configured"
+ env_line = f"|- {env_var}: configured"
if self.use_color:
lines.append(self._styled(env_line, "dim"))
else:
lines.append(env_line)
- # Change last ├─ to └─
+ # Change last |- to +-
if lines and len(lines) > 1:
- lines[-1] = lines[-1].replace("├─", "└─")
+ lines[-1] = lines[-1].replace("|-", "+-")
return lines
@@ -231,7 +231,7 @@ def format_execution_success(self, runtime: str, execution_time: Optional[float]
"""
lines = []
- success_msg = f"✅ {runtime.title()} execution completed successfully"
+ success_msg = f"[+] {runtime.title()} execution completed successfully"
if execution_time is not None:
success_msg += f" ({execution_time:.2f}s)"
@@ -255,7 +255,7 @@ def format_execution_error(self, runtime: str, error_code: int, error_msg: Optio
"""
lines = []
- error_header = f"✗ {runtime.title()} execution failed (exit code: {error_code})"
+ error_header = f"x {runtime.title()} execution failed (exit code: {error_code})"
if self.use_color:
lines.append(self._styled(error_header, "red bold"))
else:
@@ -293,14 +293,14 @@ def format_subprocess_details(self, args: List[str], content_length: int) -> Lis
# Show command structure
args_display = " ".join(f'"{arg}"' if " " in arg else arg for arg in args)
- command_line = f"├─ Args: {args_display}"
+ command_line = f"|- Args: {args_display}"
if self.use_color:
lines.append(self._styled(command_line, "dim"))
else:
lines.append(command_line)
# Show content info
- content_line = f"└─ Content: +{content_length:,} chars appended"
+ content_line = f"+- Content: +{content_length:,} chars appended"
if self.use_color:
lines.append(self._styled(content_line, "dim"))
else:
@@ -322,7 +322,7 @@ def format_auto_discovery_message(self, script_name: str, prompt_file: Path, run
if self.use_color and RICH_AVAILABLE and self.console:
try:
text = Text()
- text.append("ℹ Auto-discovered: ", style="cyan")
+ text.append("[i] Auto-discovered: ", style="cyan")
text.append(str(prompt_file), style="bold white")
text.append(f" (runtime: {runtime})", style="dim")
@@ -331,9 +331,9 @@ def format_auto_discovery_message(self, script_name: str, prompt_file: Path, run
return capture.get().rstrip('\n')
except:
# Fallback to simple formatting
- return f"ℹ Auto-discovered: {prompt_file} (runtime: {runtime})"
+ return f"[i] Auto-discovered: {prompt_file} (runtime: {runtime})"
else:
- return f"ℹ Auto-discovered: {prompt_file} (runtime: {runtime})"
+ return f"[i] Auto-discovered: {prompt_file} (runtime: {runtime})"
def _styled(self, text: str, style: str) -> str:
"""Apply styling to text with rich fallback."""
diff --git a/src/apm_cli/primitives/models.py b/src/apm_cli/primitives/models.py
index 1c7b090d5..857d2ae12 100644
--- a/src/apm_cli/primitives/models.py
+++ b/src/apm_cli/primitives/models.py
@@ -148,14 +148,14 @@ def __init__(self):
self.contexts = []
self.skills = []
self.conflicts = []
- # Name→index maps for O(1) conflict lookups (see #171)
+ # Name->index maps for O(1) conflict lookups (see #171)
self._chatmode_index: Dict[str, int] = {}
self._instruction_index: Dict[str, int] = {}
self._context_index: Dict[str, int] = {}
self._skill_index: Dict[str, int] = {}
def _index_for(self, primitive_type: str) -> Dict[str, int]:
- """Return the name→index map for the given primitive type."""
+ """Return the name->index map for the given primitive type."""
if primitive_type == "chatmode":
return self._chatmode_index
elif primitive_type == "instruction":
diff --git a/src/apm_cli/registry/operations.py b/src/apm_cli/registry/operations.py
index cacd37756..c71095976 100644
--- a/src/apm_cli/registry/operations.py
+++ b/src/apm_cli/registry/operations.py
@@ -346,7 +346,7 @@ def _prompt_for_environment_variables(self, required_vars: Dict[str, Dict]) -> D
existing_value = os.getenv(var_name)
if existing_value:
- console.print(f" ✅ {var_name}: [dim]using existing value[/dim]")
+ console.print(f" [+] {var_name}: [dim]using existing value[/dim]")
env_vars[var_name] = existing_value
else:
# Determine if this looks like a password/secret
@@ -379,7 +379,7 @@ def _prompt_for_environment_variables(self, required_vars: Dict[str, Dict]) -> D
existing_value = os.getenv(var_name)
if existing_value:
- click.echo(f" ✅ {var_name}: using existing value")
+ click.echo(f" [+] {var_name}: using existing value")
env_vars[var_name] = existing_value
else:
prompt_text = f" {var_name}"
diff --git a/src/apm_cli/runtime/manager.py b/src/apm_cli/runtime/manager.py
index a737f2ed9..c3ebc3946 100644
--- a/src/apm_cli/runtime/manager.py
+++ b/src/apm_cli/runtime/manager.py
@@ -19,21 +19,26 @@
class RuntimeManager:
"""Manages AI runtime installation and configuration via embedded scripts."""
+ @property
+ def _is_windows(self) -> bool:
+ return sys.platform == "win32"
+
def __init__(self):
self.runtime_dir = Path.home() / ".apm" / "runtimes"
+ ext = ".ps1" if sys.platform == "win32" else ".sh"
self.supported_runtimes = {
"copilot": {
- "script": "setup-copilot.sh",
+ "script": f"setup-copilot{ext}",
"description": "GitHub Copilot CLI with native MCP integration",
"binary": "copilot"
},
"codex": {
- "script": "setup-codex.sh",
+ "script": f"setup-codex{ext}",
"description": "OpenAI Codex CLI with GitHub Models support",
"binary": "codex"
},
"llm": {
- "script": "setup-llm.sh",
+ "script": f"setup-llm{ext}",
"description": "Simon Willison's LLM library with multiple providers",
"binary": "llm"
}
@@ -60,15 +65,19 @@ def get_embedded_script(self, script_name: str) -> str:
raise FileNotFoundError(f"Script not found: {script_name}")
except Exception as e:
- click.echo(f"{Fore.RED}❌ Failed to load embedded script {script_name}: {e}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Failed to load embedded script {script_name}: {e}{Style.RESET_ALL}", err=True)
raise RuntimeError(f"Could not load setup script: {script_name}")
def get_common_script(self) -> str:
"""Get the common utilities script."""
- return self.get_embedded_script("setup-common.sh")
+ script_name = "setup-common.ps1" if self._is_windows else "setup-common.sh"
+ return self.get_embedded_script(script_name)
def get_token_helper_script(self) -> str:
"""Get the GitHub token helper script."""
+ if self._is_windows:
+ # On Windows, tokens are passed via environment variables directly
+ return ""
try:
# Try PyInstaller bundle first
if getattr(sys, 'frozen', False):
@@ -88,39 +97,58 @@ def get_token_helper_script(self) -> str:
raise FileNotFoundError("github-token-helper.sh not found")
except Exception as e:
- click.echo(f"{Fore.RED}❌ Failed to load github-token-helper.sh: {e}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Failed to load github-token-helper.sh: {e}{Style.RESET_ALL}", err=True)
raise RuntimeError("Could not load token helper script")
def run_embedded_script(self, script_content: str, common_content: str,
script_args: Optional[List[str]] = None) -> bool:
- """Execute an embedded bash script with common utilities."""
+ """Execute an embedded setup script with common utilities."""
script_args = script_args or []
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
- # Write common utilities
- common_script = temp_path / "setup-common.sh"
- common_script.write_text(common_content)
- common_script.chmod(0o755)
-
- # Write GitHub token helper
- try:
+ if self._is_windows:
+ # Write common utilities as PowerShell
+ common_script = temp_path / "setup-common.ps1"
+ common_script.write_text(common_content)
+
+ # Write GitHub token helper (empty on Windows)
token_helper_content = self.get_token_helper_script()
- token_helper_script = temp_path / "github-token-helper.sh"
- token_helper_script.write_text(token_helper_content)
- token_helper_script.chmod(0o755)
- except Exception as e:
- click.echo(f"{Fore.YELLOW}⚠️ Token helper not available, scripts may use fallback authentication: {e}{Style.RESET_ALL}")
-
- # Write main script
- main_script = temp_path / "setup-script.sh"
- main_script.write_text(script_content)
- main_script.chmod(0o755)
+ if token_helper_content:
+ token_helper_script = temp_path / "github-token-helper.ps1"
+ token_helper_script.write_text(token_helper_content)
+
+ # Write main script as PowerShell
+ main_script = temp_path / "setup-script.ps1"
+ main_script.write_text(script_content)
+ else:
+ # Write common utilities as bash
+ common_script = temp_path / "setup-common.sh"
+ common_script.write_text(common_content)
+ common_script.chmod(0o755)
+
+ # Write GitHub token helper
+ try:
+ token_helper_content = self.get_token_helper_script()
+ token_helper_script = temp_path / "github-token-helper.sh"
+ token_helper_script.write_text(token_helper_content)
+ token_helper_script.chmod(0o755)
+ except Exception as e:
+ click.echo(f"{Fore.YELLOW}[!] Token helper not available, scripts may use fallback authentication: {e}{Style.RESET_ALL}")
+
+ # Write main script as bash
+ main_script = temp_path / "setup-script.sh"
+ main_script.write_text(script_content)
+ main_script.chmod(0o755)
# Execute script with environment that includes npm authentication
try:
- cmd = ["bash", str(main_script)] + script_args
+ if self._is_windows:
+ ps_cmd = shutil.which("pwsh") or shutil.which("powershell") or "powershell"
+ cmd = [ps_cmd, "-NoProfile", "-ExecutionPolicy", "Bypass", "-File", str(main_script)] + script_args
+ else:
+ cmd = ["bash", str(main_script)] + script_args
# Prepare environment with GitHub tokens for all authentication needs
env = os.environ.copy()
@@ -142,51 +170,57 @@ def run_embedded_script(self, script_content: str, common_content: str,
)
return result.returncode == 0
except Exception as e:
- click.echo(f"{Fore.RED}❌ Failed to execute setup script: {e}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Failed to execute setup script: {e}{Style.RESET_ALL}", err=True)
return False
def setup_runtime(self, runtime_name: str, version: Optional[str] = None, vanilla: bool = False) -> bool:
"""Set up a specific runtime."""
if runtime_name not in self.supported_runtimes:
- click.echo(f"{Fore.RED}❌ Unsupported runtime: {runtime_name}{Style.RESET_ALL}", err=True)
- click.echo(f"{Fore.BLUE}ℹ️ Supported runtimes: {', '.join(self.supported_runtimes.keys())}{Style.RESET_ALL}")
+ click.echo(f"{Fore.RED}[x] Unsupported runtime: {runtime_name}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.BLUE}[i] Supported runtimes: {', '.join(self.supported_runtimes.keys())}{Style.RESET_ALL}")
return False
runtime_info = self.supported_runtimes[runtime_name]
script_name = runtime_info["script"]
description = runtime_info["description"]
- click.echo(f"{Fore.BLUE}🔧 Setting up {runtime_name} runtime: {description}{Style.RESET_ALL}")
+ click.echo(f"{Fore.BLUE} Setting up {runtime_name} runtime: {description}{Style.RESET_ALL}")
if vanilla:
- click.echo(f"{Fore.YELLOW}⚠️ Installing in vanilla mode - no APM configuration will be applied{Style.RESET_ALL}")
+ click.echo(f"{Fore.YELLOW}[!] Installing in vanilla mode - no APM configuration will be applied{Style.RESET_ALL}")
else:
- click.echo(f"{Fore.BLUE}ℹ️ Installing with APM defaults (GitHub Models for free access){Style.RESET_ALL}")
+ click.echo(f"{Fore.BLUE}[i] Installing with APM defaults (GitHub Models for free access){Style.RESET_ALL}")
try:
# Get scripts
script_content = self.get_embedded_script(script_name)
common_content = self.get_common_script()
- # Prepare arguments
+ # Prepare arguments (PowerShell scripts use named params like -Version/-Vanilla)
script_args = []
if version:
- script_args.append(version)
+ if self._is_windows:
+ script_args.extend(["-Version", version])
+ else:
+ script_args.append(version)
if vanilla:
- script_args.append("--vanilla")
+ if self._is_windows:
+ script_args.append("-Vanilla")
+ else:
+ script_args.append("--vanilla")
# Run setup script
success = self.run_embedded_script(script_content, common_content, script_args)
if success:
- click.echo(f"{Fore.GREEN}✅ Successfully set up {runtime_name} runtime{Style.RESET_ALL}")
+ click.echo(f"{Fore.GREEN}[+] Successfully set up {runtime_name} runtime{Style.RESET_ALL}")
return True
else:
- click.echo(f"{Fore.RED}❌ Failed to set up {runtime_name} runtime{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Failed to set up {runtime_name} runtime{Style.RESET_ALL}", err=True)
return False
except Exception as e:
- click.echo(f"{Fore.RED}❌ Error setting up {runtime_name}: {e}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Error setting up {runtime_name}: {e}{Style.RESET_ALL}", err=True)
return False
def list_runtimes(self) -> Dict[str, Dict[str, str]]:
@@ -250,7 +284,7 @@ def is_runtime_available(self, runtime_name: str) -> bool:
def remove_runtime(self, runtime_name: str) -> bool:
"""Remove an installed runtime."""
if runtime_name not in self.supported_runtimes:
- click.echo(f"{Fore.RED}❌ Unknown runtime: {runtime_name}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Unknown runtime: {runtime_name}{Style.RESET_ALL}", err=True)
return False
# Handle copilot runtime (npm-based, global install)
@@ -262,13 +296,13 @@ def remove_runtime(self, runtime_name: str) -> bool:
text=True
)
if result.returncode == 0:
- click.echo(f"{Fore.GREEN}✅ Successfully removed {runtime_name} runtime{Style.RESET_ALL}")
+ click.echo(f"{Fore.GREEN}[+] Successfully removed {runtime_name} runtime{Style.RESET_ALL}")
return True
else:
- click.echo(f"{Fore.RED}❌ Failed to remove {runtime_name}: {result.stderr}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Failed to remove {runtime_name}: {result.stderr}{Style.RESET_ALL}", err=True)
return False
except Exception as e:
- click.echo(f"{Fore.RED}❌ Failed to remove {runtime_name}: {e}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Failed to remove {runtime_name}: {e}{Style.RESET_ALL}", err=True)
return False
# Handle other runtimes (installed in APM runtime directory)
@@ -276,7 +310,7 @@ def remove_runtime(self, runtime_name: str) -> bool:
binary_path = self.runtime_dir / binary_name
if not binary_path.exists():
- click.echo(f"{Fore.YELLOW}⚠️ Runtime {runtime_name} is not installed in APM runtime directory{Style.RESET_ALL}")
+ click.echo(f"{Fore.YELLOW}[!] Runtime {runtime_name} is not installed in APM runtime directory{Style.RESET_ALL}")
return False
try:
@@ -291,11 +325,11 @@ def remove_runtime(self, runtime_name: str) -> bool:
if venv_path.exists():
shutil.rmtree(venv_path)
- click.echo(f"{Fore.GREEN}✅ Successfully removed {runtime_name} runtime{Style.RESET_ALL}")
+ click.echo(f"{Fore.GREEN}[+] Successfully removed {runtime_name} runtime{Style.RESET_ALL}")
return True
except Exception as e:
- click.echo(f"{Fore.RED}❌ Failed to remove {runtime_name}: {e}{Style.RESET_ALL}", err=True)
+ click.echo(f"{Fore.RED}[x] Failed to remove {runtime_name}: {e}{Style.RESET_ALL}", err=True)
return False
def get_runtime_preference(self) -> List[str]:
diff --git a/src/apm_cli/utils/console.py b/src/apm_cli/utils/console.py
index 492db1571..ea06b4ee9 100644
--- a/src/apm_cli/utils/console.py
+++ b/src/apm_cli/utils/console.py
@@ -30,28 +30,28 @@
Style = None
-# Status symbols for consistent iconography
+# Status symbols for consistent iconography (ASCII-safe for Windows cp1252)
STATUS_SYMBOLS = {
- 'success': '✨',
- 'sparkles': '✨',
- 'running': '🚀',
- 'gear': '⚙️',
- 'info': '💡',
- 'warning': '⚠️',
- 'error': '❌',
- 'check': '✅',
- 'cross': '❌',
- 'list': '📋',
- 'preview': '👀',
- 'robot': '🤖',
- 'metrics': '📊',
- 'default': '📍', # Default script marker
- 'eyes': '👀', # Watch mode
- 'folder': '📁', # Directory/folder operations
- 'cogs': '⚙️', # Compilation/processing
- 'plugin': '🔌', # Plugin-related operations
- 'search': '🔍', # Search operations
- 'download': '📥', # Download operations
+ 'success': '[*]',
+ 'sparkles': '[*]',
+ 'running': '[>]',
+ 'gear': '[*]',
+ 'info': '[i]',
+ 'warning': '[!]',
+ 'error': '[x]',
+ 'check': '[+]',
+ 'cross': '[x]',
+ 'list': '[#]',
+ 'preview': '[>]',
+ 'robot': '[>]',
+ 'metrics': '[#]',
+ 'default': '[>]', # Default script marker
+ 'eyes': '[>]', # Watch mode
+ 'folder': '[>]', # Directory/folder operations
+ 'cogs': '[*]', # Compilation/processing
+ 'plugin': '[>]', # Plugin-related operations
+ 'search': '[>]', # Search operations
+ 'download': '[>]', # Download operations
}
@@ -151,7 +151,7 @@ def _create_files_table(files_data: list, title: str = "Files") -> Optional[Any]
return None
try:
- table = Table(title=f"📋 {title}", show_header=True, header_style="bold cyan")
+ table = Table(title=title, show_header=True, header_style="bold cyan")
table.add_column("File", style="bold white")
table.add_column("Description", style="white")
@@ -180,13 +180,13 @@ def show_download_spinner(repo_name: str):
console = _get_console()
if console and RICH_AVAILABLE:
try:
- with console.status(f"[cyan]⬇️ Downloading {repo_name}...", spinner="dots") as status:
+ with console.status(f"[cyan]Downloading {repo_name}...", spinner="dots") as status:
yield status
except Exception:
# Fallback if Rich fails
- click.echo(f"⬇️ Downloading {repo_name}...")
+ click.echo(f"Downloading {repo_name}...")
yield None
else:
# Fallback for non-Rich environments
- click.echo(f"⬇️ Downloading {repo_name}...")
+ click.echo(f"Downloading {repo_name}...")
yield None
\ No newline at end of file
diff --git a/src/apm_cli/utils/github_host.py b/src/apm_cli/utils/github_host.py
index 7242d841e..0ae1990c6 100644
--- a/src/apm_cli/utils/github_host.py
+++ b/src/apm_cli/utils/github_host.py
@@ -100,10 +100,10 @@ def unsupported_host_error(hostname: str, context: Optional[str] = None) -> str:
msg += f"Invalid Git host: '{hostname}'.\n"
msg += "\n"
msg += "APM supports any valid FQDN as a Git host, including:\n"
- msg += " • github.com\n"
- msg += " • *.ghe.com (GitHub Enterprise Cloud)\n"
- msg += " • dev.azure.com, *.visualstudio.com (Azure DevOps)\n"
- msg += " • gitlab.com, bitbucket.org, or any self-hosted Git server\n"
+ msg += " * github.com\n"
+ msg += " * *.ghe.com (GitHub Enterprise Cloud)\n"
+ msg += " * dev.azure.com, *.visualstudio.com (Azure DevOps)\n"
+ msg += " * gitlab.com, bitbucket.org, or any self-hosted Git server\n"
msg += "\n"
if current_host:
diff --git a/tests/integration/test_compile_permission_denied.py b/tests/integration/test_compile_permission_denied.py
index 8c3c1ebf5..7bd6e31ff 100644
--- a/tests/integration/test_compile_permission_denied.py
+++ b/tests/integration/test_compile_permission_denied.py
@@ -7,9 +7,12 @@
from ..utils.constitution_fixtures import temp_project_with_constitution, DEFAULT_CONSTITUTION
+import pytest
+
CLI = [sys.executable, "-m", "apm_cli.cli", "compile", "--single-agents"]
+@pytest.mark.skipif(sys.platform == "win32", reason="Windows handles read-only directories differently")
def test_permission_denied_graceful(tmp_path: Path):
# Use temp project with constitution to force write
with temp_project_with_constitution(constitution_text=DEFAULT_CONSTITUTION) as proj:
diff --git a/tests/integration/test_multi_runtime_integration.py b/tests/integration/test_multi_runtime_integration.py
index 5ab775cd1..66ab2399a 100644
--- a/tests/integration/test_multi_runtime_integration.py
+++ b/tests/integration/test_multi_runtime_integration.py
@@ -94,7 +94,7 @@ def test_runtime_factory_integration():
# Should have at least LLM available
assert len(available) >= 1
- assert any(rt["name"] == "llm" for rt in available)
+ assert any(rt.get("name") == "llm" for rt in available)
# Test runtime existence checks
assert RuntimeFactory.runtime_exists("llm") is True
diff --git a/tests/integration/test_plugin_e2e.py b/tests/integration/test_plugin_e2e.py
index 1a86b07b6..c1d543394 100644
--- a/tests/integration/test_plugin_e2e.py
+++ b/tests/integration/test_plugin_e2e.py
@@ -9,6 +9,7 @@
import os
import shutil
import subprocess
+import sys
from datetime import datetime
from pathlib import Path
@@ -186,6 +187,7 @@ def test_empty_dir_rejected(self, tmp_path):
# ---- Test 4: Symlinks not followed ----------------------------------
+ @pytest.mark.skipif(sys.platform == "win32", reason="Symlinks require admin privileges on Windows")
def test_symlinks_not_followed(self, tmp_path):
"""Symlinks inside plugin dirs must NOT be dereferenced during copytree."""
plugin_dir = tmp_path / "symlink-plugin"
diff --git a/tests/integration/test_runtime_smoke.py b/tests/integration/test_runtime_smoke.py
index 4e171797b..144eb9cc8 100644
--- a/tests/integration/test_runtime_smoke.py
+++ b/tests/integration/test_runtime_smoke.py
@@ -7,6 +7,7 @@
import os
import subprocess
+import sys
import tempfile
import shutil
import pytest
@@ -63,6 +64,7 @@ def run_command(cmd, check=True, capture_output=True, timeout=60, cwd=None):
class TestRuntimeSmoke:
"""Smoke tests for APM runtime installation and basic functionality."""
+ @pytest.mark.skipif(sys.platform == "win32", reason="Bash scripts not available on Windows")
def test_codex_runtime_setup(self, temp_apm_home):
"""Test that Codex runtime setup script works correctly."""
# Get the project root (where scripts are located)
@@ -94,6 +96,7 @@ def test_codex_runtime_setup(self, temp_apm_home):
assert "github-models" in config_content, "GitHub Models config not found"
assert "gpt-4o" in config_content, "Default model not configured"
+ @pytest.mark.skipif(sys.platform == "win32", reason="Bash scripts not available on Windows")
def test_llm_runtime_setup(self, temp_apm_home):
"""Test that LLM runtime setup script works correctly."""
# Get the project root
@@ -168,7 +171,7 @@ def test_apm_runtime_detection(self, temp_apm_home):
runtime_dir = Path(temp_apm_home) / ".apm" / "runtimes"
if runtime_dir.exists():
original_path = os.environ.get('PATH', '')
- os.environ['PATH'] = f"{runtime_dir}:{original_path}"
+ os.environ['PATH'] = f"{runtime_dir}{os.pathsep}{original_path}"
try:
# Test runtime detection
diff --git a/tests/test_apm_package_models.py b/tests/test_apm_package_models.py
index ae5d61da6..4e9228aed 100644
--- a/tests/test_apm_package_models.py
+++ b/tests/test_apm_package_models.py
@@ -631,17 +631,17 @@ def test_summary(self):
"""Test validation summary messages."""
# Valid with no issues
result1 = ValidationResult()
- assert "✅ Package is valid" in result1.summary()
+ assert "[+] Package is valid" in result1.summary()
# Valid with warnings
result2 = ValidationResult()
result2.add_warning("Test warning")
- assert "⚠️ Package is valid with 1 warning(s)" in result2.summary()
+ assert "[!] Package is valid with 1 warning(s)" in result2.summary()
# Invalid with errors
result3 = ValidationResult()
result3.add_error("Test error")
- assert "❌ Package is invalid with 1 error(s)" in result3.summary()
+ assert "[x] Package is invalid with 1 error(s)" in result3.summary()
class TestPackageValidation:
diff --git a/tests/test_apm_resolver.py b/tests/test_apm_resolver.py
index aa55b2dd8..5b9a21981 100644
--- a/tests/test_apm_resolver.py
+++ b/tests/test_apm_resolver.py
@@ -327,7 +327,7 @@ def test_create_resolution_summary(self):
assert "test-package" in summary
assert "Total dependencies: 1" in summary
- assert "✅ Valid" in summary
+ assert "[+] Valid" in summary
def test_max_depth_limit(self):
"""Test that maximum depth limit is respected."""
diff --git a/tests/test_github_downloader.py b/tests/test_github_downloader.py
index 7b9556432..61f1bfc4f 100644
--- a/tests/test_github_downloader.py
+++ b/tests/test_github_downloader.py
@@ -259,12 +259,12 @@ def test_get_clone_progress_callback(self):
# Test with max_count
with patch('builtins.print') as mock_print:
callback(1, 50, 100, "Cloning")
- mock_print.assert_called_with("\r🚀 Cloning: 50% (50/100) Cloning", end='', flush=True)
+ mock_print.assert_called_with("\r Cloning: 50% (50/100) Cloning", end='', flush=True)
# Test without max_count
with patch('builtins.print') as mock_print:
callback(1, 25, None, "Receiving objects")
- mock_print.assert_called_with("\r🚀 Cloning: Receiving objects (25)", end='', flush=True)
+ mock_print.assert_called_with("\r Cloning: Receiving objects (25)", end='', flush=True)
class TestGitHubPackageDownloaderIntegration:
@@ -1126,5 +1126,23 @@ def fake_clone_with_fallback(url, path, progress_reporter=None, **kwargs):
assert cloned_paths[0].name == "repo_clone"
+class TestGitEnvironmentPlatformBehavior:
+ """Test platform-specific behavior in Git environment setup."""
+
+ def test_git_config_global_uses_nul_on_windows(self):
+ """GIT_CONFIG_GLOBAL should be 'NUL' on Windows."""
+ with patch.dict(os.environ, {'GITHUB_APM_PAT': 'tok'}, clear=True), \
+ patch('sys.platform', 'win32'):
+ dl = GitHubPackageDownloader()
+ assert dl.git_env['GIT_CONFIG_GLOBAL'] == 'NUL'
+
+ def test_git_config_global_uses_dev_null_on_unix(self):
+ """GIT_CONFIG_GLOBAL should be '/dev/null' on Unix."""
+ with patch.dict(os.environ, {'GITHUB_APM_PAT': 'tok'}, clear=True), \
+ patch('sys.platform', 'darwin'):
+ dl = GitHubPackageDownloader()
+ assert dl.git_env['GIT_CONFIG_GLOBAL'] == '/dev/null'
+
+
if __name__ == '__main__':
pytest.main([__file__])
\ No newline at end of file
diff --git a/tests/test_runnable_prompts.py b/tests/test_runnable_prompts.py
index 58685f343..693029c3e 100644
--- a/tests/test_runnable_prompts.py
+++ b/tests/test_runnable_prompts.py
@@ -65,7 +65,7 @@ def test_discover_prompt_file_local_apm_dir(self, tmp_path):
assert result is not None
assert result.name == "test.prompt.md"
- assert ".apm/prompts" in str(result)
+ assert ".apm/prompts" in str(result).replace("\\", "/")
def test_discover_prompt_file_github_dir(self, tmp_path):
"""Test discovery in .github/prompts/."""
@@ -81,7 +81,7 @@ def test_discover_prompt_file_github_dir(self, tmp_path):
assert result is not None
assert result.name == "test.prompt.md"
- assert ".github/prompts" in str(result)
+ assert ".github/prompts" in str(result).replace("\\", "/")
def test_discover_prompt_file_dependencies(self, tmp_path):
"""Test discovery in apm_modules/."""
diff --git a/tests/unit/integration/test_deployed_files_manifest.py b/tests/unit/integration/test_deployed_files_manifest.py
index e67522da6..9cd9ba1f4 100644
--- a/tests/unit/integration/test_deployed_files_manifest.py
+++ b/tests/unit/integration/test_deployed_files_manifest.py
@@ -244,7 +244,7 @@ def test_target_paths_only_includes_deployed(self, tmp_path: Path):
result = PromptIntegrator().integrate_package_prompts(
info, tmp_path, force=False, managed_files=managed
)
- rel_paths = [str(p.relative_to(tmp_path)) for p in result.target_paths]
+ rel_paths = [p.relative_to(tmp_path).as_posix() for p in result.target_paths]
assert ".github/prompts/b.prompt.md" in rel_paths
assert ".github/prompts/a.prompt.md" not in rel_paths
@@ -584,7 +584,7 @@ def test_skipped_files_excluded_from_target_paths(self, tmp_path: Path):
result = CommandIntegrator().integrate_package_commands(
info, tmp_path, force=False, managed_files=managed
)
- rel_paths = [str(p.relative_to(tmp_path)) for p in result.target_paths]
+ rel_paths = [p.relative_to(tmp_path).as_posix() for p in result.target_paths]
assert ".claude/commands/b.md" in rel_paths
assert ".claude/commands/a.md" not in rel_paths
diff --git a/tests/unit/test_ado_path_structure.py b/tests/unit/test_ado_path_structure.py
index 87b4d15a8..8636719d5 100644
--- a/tests/unit/test_ado_path_structure.py
+++ b/tests/unit/test_ado_path_structure.py
@@ -559,9 +559,9 @@ def test_prune_joinpath_works_for_variable_depth(self):
# GitHub 2-level
github_parts = ["owner", "repo"]
github_path = base.joinpath(*github_parts)
- assert str(github_path) == "/tmp/apm_modules/owner/repo"
+ assert github_path.as_posix().endswith("/tmp/apm_modules/owner/repo")
# ADO 3-level
ado_parts = ["org", "project", "repo"]
ado_path = base.joinpath(*ado_parts)
- assert str(ado_path) == "/tmp/apm_modules/org/project/repo"
\ No newline at end of file
+ assert ado_path.as_posix().endswith("/tmp/apm_modules/org/project/repo")
\ No newline at end of file
diff --git a/tests/unit/test_auth_scoping.py b/tests/unit/test_auth_scoping.py
index d4ae41fe4..3a4eaee5f 100644
--- a/tests/unit/test_auth_scoping.py
+++ b/tests/unit/test_auth_scoping.py
@@ -7,6 +7,7 @@
"""
import os
+import sys
import tempfile
from pathlib import Path
from unittest.mock import Mock, patch, MagicMock
@@ -171,7 +172,8 @@ def test_github_host_env_is_locked_down(self):
env_used = calls[0][1].get("env", calls[0].kwargs.get("env"))
assert env_used.get("GIT_ASKPASS") == "echo"
assert env_used.get("GIT_CONFIG_NOSYSTEM") == "1"
- assert env_used.get("GIT_CONFIG_GLOBAL") == "/dev/null"
+ expected_null = "NUL" if sys.platform == "win32" else "/dev/null"
+ assert env_used.get("GIT_CONFIG_GLOBAL") == expected_null
def test_github_host_no_token_allows_credential_helpers(self):
"""For GitHub hosts WITHOUT a token, env is relaxed so credential helpers work."""
diff --git a/tests/unit/test_copilot_runtime.py b/tests/unit/test_copilot_runtime.py
index bd7b4028e..3f80268cb 100644
--- a/tests/unit/test_copilot_runtime.py
+++ b/tests/unit/test_copilot_runtime.py
@@ -71,7 +71,7 @@ def test_get_mcp_config_path(self):
runtime = CopilotRuntime()
config_path = runtime.get_mcp_config_path()
- assert str(config_path).endswith(".copilot/mcp-config.json")
+ assert config_path.as_posix().endswith(".copilot/mcp-config.json")
def test_execute_prompt_basic(self):
"""Test basic prompt execution."""
diff --git a/tests/unit/test_init_command.py b/tests/unit/test_init_command.py
index 76ff3a7eb..9ead69f6c 100644
--- a/tests/unit/test_init_command.py
+++ b/tests/unit/test_init_command.py
@@ -38,223 +38,259 @@ def test_init_current_directory(self):
"""Test initialization in current directory (minimal mode)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
+ try:
- result = self.runner.invoke(cli, ["init", "--yes"])
+ result = self.runner.invoke(cli, ["init", "--yes"])
- assert result.exit_code == 0
- assert "APM project initialized successfully!" in result.output
- assert Path("apm.yml").exists()
- # Minimal mode: no template files created
- assert not Path("hello-world.prompt.md").exists()
- assert not Path("README.md").exists()
- assert not Path(".apm").exists()
+ assert result.exit_code == 0
+ assert "APM project initialized successfully!" in result.output
+ assert Path("apm.yml").exists()
+ # Minimal mode: no template files created
+ assert not Path("hello-world.prompt.md").exists()
+ assert not Path("README.md").exists()
+ assert not Path(".apm").exists()
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_explicit_current_directory(self):
"""Test initialization with explicit '.' argument (minimal mode)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
+ try:
- result = self.runner.invoke(cli, ["init", ".", "--yes"])
+ result = self.runner.invoke(cli, ["init", ".", "--yes"])
- assert result.exit_code == 0
- assert "APM project initialized successfully!" in result.output
- assert Path("apm.yml").exists()
- # Minimal mode: no template files created
- assert not Path("hello-world.prompt.md").exists()
+ assert result.exit_code == 0
+ assert "APM project initialized successfully!" in result.output
+ assert Path("apm.yml").exists()
+ # Minimal mode: no template files created
+ assert not Path("hello-world.prompt.md").exists()
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_new_directory(self):
"""Test initialization in new directory (minimal mode)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
-
- result = self.runner.invoke(cli, ["init", "my-project", "--yes"])
-
- assert result.exit_code == 0
- assert "Created project directory: my-project" in result.output
- # Use absolute path to check files
- project_path = Path(tmp_dir) / "my-project"
- assert project_path.exists()
- assert project_path.is_dir()
- assert (project_path / "apm.yml").exists()
- # Minimal mode: no template files created
- assert not (project_path / "hello-world.prompt.md").exists()
- assert not (project_path / "README.md").exists()
- assert not (project_path / ".apm").exists()
+ try:
+
+ result = self.runner.invoke(cli, ["init", "my-project", "--yes"])
+
+ assert result.exit_code == 0
+ assert "Created project directory: my-project" in result.output
+ # Use absolute path to check files
+ project_path = Path(tmp_dir) / "my-project"
+ assert project_path.exists()
+ assert project_path.is_dir()
+ assert (project_path / "apm.yml").exists()
+ # Minimal mode: no template files created
+ assert not (project_path / "hello-world.prompt.md").exists()
+ assert not (project_path / "README.md").exists()
+ assert not (project_path / ".apm").exists()
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_existing_project_without_force(self):
"""Test initialization over existing apm.yml without --force (removed flag)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
+ try:
- # Create existing apm.yml
- Path("apm.yml").write_text("name: existing-project\nversion: 0.1.0\n")
+ # Create existing apm.yml
+ Path("apm.yml").write_text("name: existing-project\nversion: 0.1.0\n")
- # Try to init without interactive confirmation (should prompt)
- result = self.runner.invoke(cli, ["init", "--yes"])
+ # Try to init without interactive confirmation (should prompt)
+ result = self.runner.invoke(cli, ["init", "--yes"])
- assert result.exit_code == 0
- assert "apm.yml already exists" in result.output
- assert "--yes specified, overwriting apm.yml..." in result.output
+ assert result.exit_code == 0
+ assert "apm.yml already exists" in result.output
+ assert "--yes specified, overwriting apm.yml..." in result.output
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_existing_project_with_force(self):
"""Test initialization over existing apm.yml (--force flag removed, behavior same as --yes)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
-
- # Create existing apm.yml
- Path("apm.yml").write_text("name: existing-project\nversion: 0.1.0\n")
-
- result = self.runner.invoke(cli, ["init", "--yes"])
-
- assert result.exit_code == 0
- assert "APM project initialized successfully!" in result.output
- # Should overwrite the file with minimal structure
- with open("apm.yml") as f:
- config = yaml.safe_load(f)
- # Minimal structure
- assert "dependencies" in config
- assert config["dependencies"] == {"apm": [], "mcp": []}
- assert "scripts" in config
- assert config["scripts"] == {}
+ try:
+
+ # Create existing apm.yml
+ Path("apm.yml").write_text("name: existing-project\nversion: 0.1.0\n")
+
+ result = self.runner.invoke(cli, ["init", "--yes"])
+
+ assert result.exit_code == 0
+ assert "APM project initialized successfully!" in result.output
+ # Should overwrite the file with minimal structure
+ with open("apm.yml") as f:
+ config = yaml.safe_load(f)
+ # Minimal structure
+ assert "dependencies" in config
+ assert config["dependencies"] == {"apm": [], "mcp": []}
+ assert "scripts" in config
+ assert config["scripts"] == {}
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_preserves_existing_config(self):
"""Test that init with --yes overwrites existing apm.yml (no merge in minimal mode)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
-
- # Create existing apm.yml with custom values
- existing_config = {
- "name": "my-custom-project",
- "version": "2.0.0",
- "description": "Custom description",
- "author": "Custom Author",
- }
- with open("apm.yml", "w") as f:
- yaml.dump(existing_config, f)
-
- result = self.runner.invoke(cli, ["init", "--yes"])
-
- assert result.exit_code == 0
- # Minimal mode: overwrites with auto-detected values
- assert "apm.yml already exists" in result.output
+ try:
+
+ # Create existing apm.yml with custom values
+ existing_config = {
+ "name": "my-custom-project",
+ "version": "2.0.0",
+ "description": "Custom description",
+ "author": "Custom Author",
+ }
+ with open("apm.yml", "w") as f:
+ yaml.dump(existing_config, f)
+
+ result = self.runner.invoke(cli, ["init", "--yes"])
+
+ assert result.exit_code == 0
+ # Minimal mode: overwrites with auto-detected values
+ assert "apm.yml already exists" in result.output
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_interactive_mode(self):
"""Test interactive mode with user input."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
-
- # Simulate user input
- user_input = "my-test-project\n1.5.0\nTest description\nTest Author\ny\n"
-
- result = self.runner.invoke(cli, ["init"], input=user_input)
-
- assert result.exit_code == 0
- assert "Setting up your APM project" in result.output
- assert "Project name" in result.output
- assert "Version" in result.output
- assert "Description" in result.output
- assert "Author" in result.output
-
- # Verify the interactive values were applied to apm.yml
- with open("apm.yml") as f:
- config = yaml.safe_load(f)
- assert config["name"] == "my-test-project"
- assert config["version"] == "1.5.0"
- assert config["description"] == "Test description"
- assert config["author"] == "Test Author"
+ try:
+
+ # Simulate user input
+ user_input = "my-test-project\n1.5.0\nTest description\nTest Author\ny\n"
+
+ result = self.runner.invoke(cli, ["init"], input=user_input)
+
+ assert result.exit_code == 0
+ assert "Setting up your APM project" in result.output
+ assert "Project name" in result.output
+ assert "Version" in result.output
+ assert "Description" in result.output
+ assert "Author" in result.output
+
+ # Verify the interactive values were applied to apm.yml
+ with open("apm.yml") as f:
+ config = yaml.safe_load(f)
+ assert config["name"] == "my-test-project"
+ assert config["version"] == "1.5.0"
+ assert config["description"] == "Test description"
+ assert config["author"] == "Test Author"
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_interactive_mode_abort(self):
"""Test aborting interactive mode."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
+ try:
- # Simulate user input with 'no' to confirmation
- user_input = "my-test-project\n1.5.0\nTest description\nTest Author\nn\n"
+ # Simulate user input with 'no' to confirmation
+ user_input = "my-test-project\n1.5.0\nTest description\nTest Author\nn\n"
- result = self.runner.invoke(cli, ["init"], input=user_input)
+ result = self.runner.invoke(cli, ["init"], input=user_input)
- assert result.exit_code == 0
- assert "Aborted" in result.output
- assert not Path("apm.yml").exists()
+ assert result.exit_code == 0
+ assert "Aborted" in result.output
+ assert not Path("apm.yml").exists()
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_existing_project_interactive_cancel(self):
"""Test cancelling when existing apm.yml detected in interactive mode."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
+ try:
- # Create existing apm.yml
- Path("apm.yml").write_text("name: existing-project\nversion: 0.1.0\n")
+ # Create existing apm.yml
+ Path("apm.yml").write_text("name: existing-project\nversion: 0.1.0\n")
- # Simulate user saying 'no' to overwrite
- result = self.runner.invoke(cli, ["init"], input="n\n")
+ # Simulate user saying 'no' to overwrite
+ result = self.runner.invoke(cli, ["init"], input="n\n")
- assert result.exit_code == 0
- assert "apm.yml already exists" in result.output
- assert "Initialization cancelled" in result.output
+ assert result.exit_code == 0
+ assert "apm.yml already exists" in result.output
+ assert "Initialization cancelled" in result.output
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_validates_project_structure(self):
"""Test that init creates minimal project structure."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
+ try:
- result = self.runner.invoke(cli, ["init", "test-project", "--yes"])
+ result = self.runner.invoke(cli, ["init", "test-project", "--yes"])
- assert result.exit_code == 0
+ assert result.exit_code == 0
- # Use absolute path for checking files
- project_path = Path(tmp_dir) / "test-project"
+ # Use absolute path for checking files
+ project_path = Path(tmp_dir) / "test-project"
- # Verify apm.yml minimal structure
- with open(project_path / "apm.yml") as f:
- config = yaml.safe_load(f)
- assert config["name"] == "test-project"
- assert "version" in config
- assert "dependencies" in config
- assert config["dependencies"] == {"apm": [], "mcp": []}
- assert "scripts" in config
- assert config["scripts"] == {}
+ # Verify apm.yml minimal structure
+ with open(project_path / "apm.yml") as f:
+ config = yaml.safe_load(f)
+ assert config["name"] == "test-project"
+ assert "version" in config
+ assert "dependencies" in config
+ assert config["dependencies"] == {"apm": [], "mcp": []}
+ assert "scripts" in config
+ assert config["scripts"] == {}
- # Minimal mode: no template files created
- assert not (project_path / "hello-world.prompt.md").exists()
- assert not (project_path / "README.md").exists()
- assert not (project_path / ".apm").exists()
+ # Minimal mode: no template files created
+ assert not (project_path / "hello-world.prompt.md").exists()
+ assert not (project_path / "README.md").exists()
+ assert not (project_path / ".apm").exists()
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_auto_detection(self):
"""Test auto-detection of project metadata."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
+ try:
- # Initialize git repo and set author
- import subprocess
+ # Initialize git repo and set author
+ import subprocess
- git_init = subprocess.run(["git", "init"], capture_output=True)
- assert git_init.returncode == 0, f"git init failed: {git_init.stderr}"
+ git_init = subprocess.run(["git", "init"], capture_output=True)
+ assert git_init.returncode == 0, f"git init failed: {git_init.stderr}"
- git_config = subprocess.run(
- ["git", "config", "user.name", "Test User"], capture_output=True
- )
- assert (
- git_config.returncode == 0
- ), f"git config failed: {git_config.stderr}"
+ git_config = subprocess.run(
+ ["git", "config", "user.name", "Test User"], capture_output=True
+ )
+ assert (
+ git_config.returncode == 0
+ ), f"git config failed: {git_config.stderr}"
- result = self.runner.invoke(cli, ["init", "--yes"])
+ result = self.runner.invoke(cli, ["init", "--yes"])
- assert result.exit_code == 0
+ assert result.exit_code == 0
- with open("apm.yml") as f:
- config = yaml.safe_load(f)
- # Should auto-detect author from git
- assert config["author"] == "Test User"
- # Should auto-detect description
- assert "APM project" in config["description"]
+ with open("apm.yml") as f:
+ config = yaml.safe_load(f)
+ # Should auto-detect author from git
+ assert config["author"] == "Test User"
+ # Should auto-detect description
+ assert "APM project" in config["description"]
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
def test_init_does_not_create_skill_md(self):
"""Test that init does not create SKILL.md (only apm.yml)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
+ try:
- result = self.runner.invoke(cli, ["init", "--yes"])
+ result = self.runner.invoke(cli, ["init", "--yes"])
- assert result.exit_code == 0
- assert Path("apm.yml").exists()
- assert not Path("SKILL.md").exists()
+ assert result.exit_code == 0
+ assert Path("apm.yml").exists()
+ assert not Path("SKILL.md").exists()
+ finally:
+ os.chdir(self.original_dir) # restore CWD before TemporaryDirectory cleanup
diff --git a/tests/unit/test_mcp_client_factory.py b/tests/unit/test_mcp_client_factory.py
index 9dc8514eb..f5ed47b60 100644
--- a/tests/unit/test_mcp_client_factory.py
+++ b/tests/unit/test_mcp_client_factory.py
@@ -144,7 +144,7 @@ def test_configure_mcp_server_remote_rejected(self, mock_find_server):
mock_find_server.assert_called_once_with("remote-server")
# Verify warning message was printed
- mock_print.assert_any_call("⚠️ Warning: MCP server 'remote-server' is a remote server (SSE type)")
+ mock_print.assert_any_call("[!] Warning: MCP server 'remote-server' is a remote server (SSE type)")
mock_print.assert_any_call(" Codex CLI only supports local servers with command/args configuration")
# Verify no config was updated
diff --git a/tests/unit/test_runtime_factory.py b/tests/unit/test_runtime_factory.py
index dc0a55241..244f0d754 100644
--- a/tests/unit/test_runtime_factory.py
+++ b/tests/unit/test_runtime_factory.py
@@ -14,8 +14,8 @@ def test_get_available_runtimes_real_system(self):
# At least LLM should be available since it's installed
assert len(available) >= 1
- assert any(rt["name"] == "llm" for rt in available)
- assert all(rt["available"] for rt in available)
+ assert any(rt.get("name") == "llm" for rt in available)
+ assert all(rt.get("available") for rt in available)
def test_get_runtime_by_name_llm_real(self):
"""Test getting LLM runtime by name (real system)."""
diff --git a/tests/unit/test_runtime_windows.py b/tests/unit/test_runtime_windows.py
new file mode 100644
index 000000000..c9e5576de
--- /dev/null
+++ b/tests/unit/test_runtime_windows.py
@@ -0,0 +1,252 @@
+"""Tests for Windows platform support in RuntimeManager and ScriptRunner."""
+
+import sys
+from unittest.mock import patch, MagicMock
+import pytest
+
+# Import modules at module level BEFORE any sys.platform patching,
+# to avoid triggering Windows-only import paths (msvcrt, CREATE_NO_WINDOW) on Unix.
+from apm_cli.runtime.manager import RuntimeManager
+from apm_cli.core.script_runner import ScriptRunner
+
+
+def _make_manager(platform: str) -> RuntimeManager:
+ """Create a RuntimeManager with a specific platform."""
+ with patch("sys.platform", platform):
+ return RuntimeManager()
+
+
+class TestRuntimeManagerPlatformDetection:
+ """Test RuntimeManager selects correct scripts per platform."""
+
+ def test_selects_ps1_scripts_on_windows(self):
+ manager = _make_manager("win32")
+ for name, runtime_info in manager.supported_runtimes.items():
+ assert runtime_info["script"].endswith(".ps1"), (
+ f"Runtime '{name}' should use .ps1 on Windows, got {runtime_info['script']}"
+ )
+
+ def test_selects_sh_scripts_on_unix(self):
+ manager = _make_manager("darwin")
+ for name, runtime_info in manager.supported_runtimes.items():
+ assert runtime_info["script"].endswith(".sh"), (
+ f"Runtime '{name}' should use .sh on Unix, got {runtime_info['script']}"
+ )
+
+ def test_selects_sh_scripts_on_linux(self):
+ manager = _make_manager("linux")
+ for name, runtime_info in manager.supported_runtimes.items():
+ assert runtime_info["script"].endswith(".sh"), (
+ f"Runtime '{name}' should use .sh on Linux, got {runtime_info['script']}"
+ )
+
+ def test_common_script_is_ps1_on_windows(self):
+ manager = _make_manager("win32")
+ with patch("sys.platform", "win32"), \
+ patch.object(manager, "get_embedded_script", return_value="# ps1 content") as mock:
+ manager.get_common_script()
+ mock.assert_called_once_with("setup-common.ps1")
+
+ def test_common_script_is_sh_on_unix(self):
+ manager = _make_manager("darwin")
+ with patch("sys.platform", "darwin"), \
+ patch.object(manager, "get_embedded_script", return_value="# sh content") as mock:
+ manager.get_common_script()
+ mock.assert_called_once_with("setup-common.sh")
+
+
+class TestRuntimeManagerTokenHelper:
+ """Test token helper script platform behavior."""
+
+ def test_token_helper_returns_empty_on_windows(self):
+ manager = _make_manager("win32")
+ with patch("sys.platform", "win32"):
+ result = manager.get_token_helper_script()
+ assert result == "", "Token helper should return empty string on Windows"
+
+ def test_token_helper_loads_script_on_unix(self):
+ manager = _make_manager("darwin")
+ with patch("sys.platform", "darwin"), \
+ patch("pathlib.Path.exists", return_value=True), \
+ patch("pathlib.Path.read_text", return_value="#!/bin/bash\n# token helper"):
+ result = manager.get_token_helper_script()
+ assert result == "#!/bin/bash\n# token helper"
+
+
+class TestRuntimeManagerExecution:
+ """Test RuntimeManager uses correct shell per platform."""
+
+ def test_uses_powershell_on_windows(self):
+ """Verify PowerShell is used for script execution on Windows."""
+ manager = _make_manager("win32")
+ with patch("sys.platform", "win32"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run, \
+ patch("shutil.which", return_value=r"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe"), \
+ patch.object(manager, "get_token_helper_script", return_value=""):
+ manager.run_embedded_script("# script", "# common")
+
+ cmd = mock_run.call_args[0][0]
+ assert "powershell" in cmd[0].lower() or "pwsh" in cmd[0].lower(), (
+ f"Expected powershell/pwsh in command, got: {cmd[0]}"
+ )
+
+ def test_powershell_uses_bypass_execution_policy(self):
+ """Verify -ExecutionPolicy Bypass is passed on Windows."""
+ manager = _make_manager("win32")
+ with patch("sys.platform", "win32"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run, \
+ patch("shutil.which", return_value=r"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe"), \
+ patch.object(manager, "get_token_helper_script", return_value=""):
+ manager.run_embedded_script("# script", "# common")
+
+ cmd = mock_run.call_args[0][0]
+ assert "-ExecutionPolicy" in cmd
+ assert "Bypass" in cmd
+
+ def test_windows_writes_ps1_temp_files(self):
+ """Verify temp files use .ps1 extension on Windows."""
+ manager = _make_manager("win32")
+ with patch("sys.platform", "win32"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run, \
+ patch("shutil.which", return_value=r"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe"), \
+ patch.object(manager, "get_token_helper_script", return_value=""):
+ manager.run_embedded_script("# script content", "# common content")
+
+ cmd = mock_run.call_args[0][0]
+ file_arg_idx = cmd.index("-File") + 1
+ assert cmd[file_arg_idx].endswith(".ps1"), (
+ f"Expected .ps1 temp file, got: {cmd[file_arg_idx]}"
+ )
+
+ def test_uses_bash_on_unix(self):
+ """Verify bash is used for script execution on Unix."""
+ manager = _make_manager("linux")
+ with patch("sys.platform", "linux"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run, \
+ patch("pathlib.Path.exists", return_value=True), \
+ patch("pathlib.Path.read_text", return_value="#!/bin/bash\n# token helper"):
+ manager.run_embedded_script("# script", "# common")
+
+ cmd = mock_run.call_args[0][0]
+ assert cmd[0] == "bash", f"Expected bash, got: {cmd[0]}"
+
+ def test_unix_writes_sh_temp_files(self):
+ """Verify temp files use .sh extension on Unix."""
+ manager = _make_manager("linux")
+ with patch("sys.platform", "linux"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run, \
+ patch("pathlib.Path.exists", return_value=True), \
+ patch("pathlib.Path.read_text", return_value="#!/bin/bash"):
+ manager.run_embedded_script("# script content", "# common content")
+
+ cmd = mock_run.call_args[0][0]
+ assert cmd[1].endswith(".sh"), f"Expected .sh temp file, got: {cmd[1]}"
+
+ def test_script_args_forwarded_on_windows(self):
+ """Verify script arguments are forwarded to PowerShell."""
+ manager = _make_manager("win32")
+ with patch("sys.platform", "win32"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run, \
+ patch("shutil.which", return_value=r"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe"), \
+ patch.object(manager, "get_token_helper_script", return_value=""):
+ manager.run_embedded_script("# script", "# common", ["-Vanilla"])
+
+ cmd = mock_run.call_args[0][0]
+ assert "-Vanilla" in cmd
+
+ def test_setup_runtime_uses_ps_args_on_windows(self):
+ """Verify setup_runtime translates args to PowerShell style on Windows."""
+ manager = _make_manager("win32")
+ with patch("sys.platform", "win32"), \
+ patch.object(manager, "get_embedded_script", return_value="# ps1"), \
+ patch.object(manager, "get_common_script", return_value="# common"), \
+ patch.object(manager, "run_embedded_script", return_value=True) as mock_run:
+ manager.setup_runtime("codex", version="0.1.0", vanilla=True)
+
+ args = mock_run.call_args[0][2] # script_args is the 3rd positional arg
+ assert "-Version" in args
+ assert "0.1.0" in args
+ assert "-Vanilla" in args
+ assert "--vanilla" not in args
+
+ def test_setup_runtime_uses_unix_args_on_linux(self):
+ """Verify setup_runtime keeps Unix-style args on Linux."""
+ manager = _make_manager("linux")
+ with patch("sys.platform", "linux"), \
+ patch.object(manager, "get_embedded_script", return_value="# bash"), \
+ patch.object(manager, "get_common_script", return_value="# common"), \
+ patch.object(manager, "run_embedded_script", return_value=True) as mock_run:
+ manager.setup_runtime("codex", version="0.1.0", vanilla=True)
+
+ args = mock_run.call_args[0][2]
+ assert "0.1.0" in args
+ assert "--vanilla" in args
+ assert "-Vanilla" not in args
+
+
+class TestScriptRunnerWindowsParsing:
+ """Test ScriptRunner handles Windows command parsing."""
+
+ def test_execute_runtime_command_uses_shlex_on_windows(self):
+ """On Windows, _execute_runtime_command should use shlex.split(posix=False)."""
+ runner = ScriptRunner()
+ env = {"PATH": "/usr/bin"}
+
+ with patch("sys.platform", "win32"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run:
+ runner._execute_runtime_command("codex --quiet", "prompt content", env)
+ call_args = mock_run.call_args[0][0]
+ assert "codex" in call_args
+ assert "--quiet" in call_args
+
+ def test_execute_runtime_command_preserves_quotes_on_windows(self):
+ """On Windows, quoted arguments should be preserved by shlex.split(posix=False)."""
+ runner = ScriptRunner()
+ env = {"PATH": "/usr/bin"}
+
+ with patch("sys.platform", "win32"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run:
+ runner._execute_runtime_command(
+ 'codex --model "gpt-4o mini"', "prompt content", env
+ )
+ call_args = mock_run.call_args[0][0]
+ assert "codex" in call_args
+ # shlex.split(posix=False) keeps the quotes around the value
+ assert any("gpt-4o mini" in arg or '"gpt-4o mini"' in arg for arg in call_args)
+
+ def test_execute_runtime_command_uses_shlex_on_unix(self):
+ """On Unix, _execute_runtime_command should use shlex.split()."""
+ runner = ScriptRunner()
+ env = {"PATH": "/usr/bin"}
+
+ with patch("sys.platform", "linux"), \
+ patch("subprocess.run", return_value=MagicMock(returncode=0)) as mock_run:
+ runner._execute_runtime_command("codex --quiet", "prompt content", env)
+ call_args = mock_run.call_args[0][0]
+ assert "codex" in call_args
+ assert "--quiet" in call_args
+
+ def test_script_runner_has_runtime_command_method(self):
+ """Verify ScriptRunner has _execute_runtime_command method."""
+ runner = ScriptRunner()
+ assert hasattr(runner, "_execute_runtime_command")
+ assert callable(runner._execute_runtime_command)
+
+
+class TestIsWindowsProperty:
+ """Test _is_windows property on RuntimeManager."""
+
+ def test_is_windows_true(self):
+ manager = _make_manager("win32")
+ with patch("sys.platform", "win32"):
+ assert manager._is_windows is True
+
+ def test_is_windows_false_on_macos(self):
+ manager = _make_manager("darwin")
+ with patch("sys.platform", "darwin"):
+ assert manager._is_windows is False
+
+ def test_is_windows_false_on_linux(self):
+ manager = _make_manager("linux")
+ with patch("sys.platform", "linux"):
+ assert manager._is_windows is False
diff --git a/tests/unit/test_script_runner.py b/tests/unit/test_script_runner.py
index eb7e1c66e..5dee6e731 100644
--- a/tests/unit/test_script_runner.py
+++ b/tests/unit/test_script_runner.py
@@ -480,7 +480,7 @@ def test_compile_with_dependency_resolution(self, mock_file, mock_mkdir):
# Verify file was opened with resolved path
mock_file.assert_called()
opened_path = mock_file.call_args_list[0][0][0]
- assert str(opened_path) == "apm_modules/microsoft/apm-sample-package/test.prompt.md"
+ assert str(opened_path).replace("\\", "/") == "apm_modules/microsoft/apm-sample-package/test.prompt.md"
class TestScriptRunnerAutoInstall:
diff --git a/tests/unit/test_uninstall_transitive_cleanup.py b/tests/unit/test_uninstall_transitive_cleanup.py
index 708607a11..1d85faf4f 100644
--- a/tests/unit/test_uninstall_transitive_cleanup.py
+++ b/tests/unit/test_uninstall_transitive_cleanup.py
@@ -73,258 +73,285 @@ def test_uninstall_removes_transitive_dep(self):
"""Uninstalling pkg-a also removes pkg-a's transitive dep pkg-b."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
+ try:
+ root = Path(tmp_dir)
- # Setup: pkg-a depends on (transitive) pkg-b
- _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
- _make_apm_modules_dir(root, "acme/pkg-a")
- _make_apm_modules_dir(root, "acme/pkg-b") # transitive dep
+ # Setup: pkg-a depends on (transitive) pkg-b
+ _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
+ _make_apm_modules_dir(root, "acme/pkg-a")
+ _make_apm_modules_dir(root, "acme/pkg-b") # transitive dep
- _write_lockfile(root / "apm.lock", [
- LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
- LockedDependency(repo_url="acme/pkg-b", depth=2, resolved_by="acme/pkg-a", resolved_commit="bbb"),
- ])
+ _write_lockfile(root / "apm.lock", [
+ LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
+ LockedDependency(repo_url="acme/pkg-b", depth=2, resolved_by="acme/pkg-a", resolved_commit="bbb"),
+ ])
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
- assert result.exit_code == 0
- # Both direct and transitive should be removed
- assert not (root / "apm_modules" / "acme" / "pkg-a").exists()
- assert not (root / "apm_modules" / "acme" / "pkg-b").exists()
- assert "transitive dependency" in result.output.lower()
+ assert result.exit_code == 0
+ # Both direct and transitive should be removed
+ assert not (root / "apm_modules" / "acme" / "pkg-a").exists()
+ assert not (root / "apm_modules" / "acme" / "pkg-b").exists()
+ assert "transitive dependency" in result.output.lower()
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
def test_uninstall_keeps_shared_transitive_dep(self):
"""Transitive dep used by another remaining package is NOT removed."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
-
- # Setup: both pkg-a and pkg-c depend on (transitive) shared-lib
- _write_apm_yml(root / "apm.yml", ["acme/pkg-a", "acme/pkg-c"])
- _make_apm_modules_dir(root, "acme/pkg-a")
- _make_apm_modules_dir(root, "acme/pkg-c")
- _make_apm_modules_dir(root, "acme/shared-lib")
-
- _write_lockfile(root / "apm.lock", [
- LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
- LockedDependency(repo_url="acme/pkg-c", depth=1, resolved_commit="ccc"),
- LockedDependency(repo_url="acme/shared-lib", depth=2, resolved_by="acme/pkg-a", resolved_commit="sss"),
- ])
-
- # Uninstall only pkg-a
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
-
- assert result.exit_code == 0
- assert not (root / "apm_modules" / "acme" / "pkg-a").exists()
- # shared-lib is still used by pkg-c (it's in remaining deps via lockfile)
- # Actually, the lockfile says resolved_by=acme/pkg-a, and pkg-c doesn't
- # explicitly declare it. But shared-lib is a separate lockfile entry.
- # Our orphan detection checks remaining_deps which includes pkg-c and
- # all non-orphaned lockfile entries. Since shared-lib is flagged as orphan
- # (resolved_by=acme/pkg-a), it WILL be removed. This is correct npm behavior:
- # if pkg-c truly needs shared-lib, it should declare it in its own apm.yml,
- # which would show up as resolved_by=acme/pkg-c in the lockfile.
- assert not (root / "apm_modules" / "acme" / "shared-lib").exists()
+ try:
+ root = Path(tmp_dir)
+
+ # Setup: both pkg-a and pkg-c depend on (transitive) shared-lib
+ _write_apm_yml(root / "apm.yml", ["acme/pkg-a", "acme/pkg-c"])
+ _make_apm_modules_dir(root, "acme/pkg-a")
+ _make_apm_modules_dir(root, "acme/pkg-c")
+ _make_apm_modules_dir(root, "acme/shared-lib")
+
+ _write_lockfile(root / "apm.lock", [
+ LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
+ LockedDependency(repo_url="acme/pkg-c", depth=1, resolved_commit="ccc"),
+ LockedDependency(repo_url="acme/shared-lib", depth=2, resolved_by="acme/pkg-a", resolved_commit="sss"),
+ ])
+
+ # Uninstall only pkg-a
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
+
+ assert result.exit_code == 0
+ assert not (root / "apm_modules" / "acme" / "pkg-a").exists()
+ # shared-lib is still used by pkg-c (it's in remaining deps via lockfile)
+ # Actually, the lockfile says resolved_by=acme/pkg-a, and pkg-c doesn't
+ # explicitly declare it. But shared-lib is a separate lockfile entry.
+ # Our orphan detection checks remaining_deps which includes pkg-c and
+ # all non-orphaned lockfile entries. Since shared-lib is flagged as orphan
+ # (resolved_by=acme/pkg-a), it WILL be removed. This is correct npm behavior:
+ # if pkg-c truly needs shared-lib, it should declare it in its own apm.yml,
+ # which would show up as resolved_by=acme/pkg-c in the lockfile.
+ assert not (root / "apm_modules" / "acme" / "shared-lib").exists()
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
def test_uninstall_removes_deeply_nested_transitive_deps(self):
"""Transitive deps of transitive deps are also removed (recursive)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
+ try:
+ root = Path(tmp_dir)
- # Setup: pkg-a -> pkg-b -> pkg-c (chain of transitive deps)
- _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
- _make_apm_modules_dir(root, "acme/pkg-a")
- _make_apm_modules_dir(root, "acme/pkg-b")
- _make_apm_modules_dir(root, "acme/pkg-c")
+ # Setup: pkg-a -> pkg-b -> pkg-c (chain of transitive deps)
+ _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
+ _make_apm_modules_dir(root, "acme/pkg-a")
+ _make_apm_modules_dir(root, "acme/pkg-b")
+ _make_apm_modules_dir(root, "acme/pkg-c")
- _write_lockfile(root / "apm.lock", [
- LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
- LockedDependency(repo_url="acme/pkg-b", depth=2, resolved_by="acme/pkg-a", resolved_commit="bbb"),
- LockedDependency(repo_url="acme/pkg-c", depth=3, resolved_by="acme/pkg-b", resolved_commit="ccc"),
- ])
+ _write_lockfile(root / "apm.lock", [
+ LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
+ LockedDependency(repo_url="acme/pkg-b", depth=2, resolved_by="acme/pkg-a", resolved_commit="bbb"),
+ LockedDependency(repo_url="acme/pkg-c", depth=3, resolved_by="acme/pkg-b", resolved_commit="ccc"),
+ ])
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
- assert result.exit_code == 0
- assert not (root / "apm_modules" / "acme" / "pkg-a").exists()
- assert not (root / "apm_modules" / "acme" / "pkg-b").exists()
- assert not (root / "apm_modules" / "acme" / "pkg-c").exists()
+ assert result.exit_code == 0
+ assert not (root / "apm_modules" / "acme" / "pkg-a").exists()
+ assert not (root / "apm_modules" / "acme" / "pkg-b").exists()
+ assert not (root / "apm_modules" / "acme" / "pkg-c").exists()
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
def test_uninstall_updates_lockfile(self):
"""Lockfile is updated to remove uninstalled deps and their transitives."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
+ try:
+ root = Path(tmp_dir)
- _write_apm_yml(root / "apm.yml", ["acme/pkg-a", "acme/pkg-d"])
- _make_apm_modules_dir(root, "acme/pkg-a")
- _make_apm_modules_dir(root, "acme/pkg-b")
- _make_apm_modules_dir(root, "acme/pkg-d")
+ _write_apm_yml(root / "apm.yml", ["acme/pkg-a", "acme/pkg-d"])
+ _make_apm_modules_dir(root, "acme/pkg-a")
+ _make_apm_modules_dir(root, "acme/pkg-b")
+ _make_apm_modules_dir(root, "acme/pkg-d")
- _write_lockfile(root / "apm.lock", [
- LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
- LockedDependency(repo_url="acme/pkg-b", depth=2, resolved_by="acme/pkg-a", resolved_commit="bbb"),
- LockedDependency(repo_url="acme/pkg-d", depth=1, resolved_commit="ddd"),
- ])
+ _write_lockfile(root / "apm.lock", [
+ LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
+ LockedDependency(repo_url="acme/pkg-b", depth=2, resolved_by="acme/pkg-a", resolved_commit="bbb"),
+ LockedDependency(repo_url="acme/pkg-d", depth=1, resolved_commit="ddd"),
+ ])
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
- assert result.exit_code == 0
- # Lockfile should still exist with pkg-d
- updated_lock = LockFile.read(root / "apm.lock")
- assert updated_lock is not None
- assert updated_lock.has_dependency("acme/pkg-d")
- assert not updated_lock.has_dependency("acme/pkg-a")
- assert not updated_lock.has_dependency("acme/pkg-b")
+ assert result.exit_code == 0
+ # Lockfile should still exist with pkg-d
+ updated_lock = LockFile.read(root / "apm.lock")
+ assert updated_lock is not None
+ assert updated_lock.has_dependency("acme/pkg-d")
+ assert not updated_lock.has_dependency("acme/pkg-a")
+ assert not updated_lock.has_dependency("acme/pkg-b")
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
def test_uninstall_removes_lockfile_when_no_deps_remain(self):
"""Lockfile is deleted when all deps are removed."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
+ try:
+ root = Path(tmp_dir)
- _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
- _make_apm_modules_dir(root, "acme/pkg-a")
+ _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
+ _make_apm_modules_dir(root, "acme/pkg-a")
- _write_lockfile(root / "apm.lock", [
- LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
- ])
+ _write_lockfile(root / "apm.lock", [
+ LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
+ ])
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
- assert result.exit_code == 0
- assert not (root / "apm.lock").exists()
+ assert result.exit_code == 0
+ assert not (root / "apm.lock").exists()
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
def test_dry_run_shows_transitive_deps(self):
"""Dry run shows transitive deps that would be removed."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
+ try:
+ root = Path(tmp_dir)
- _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
- _make_apm_modules_dir(root, "acme/pkg-a")
- _make_apm_modules_dir(root, "acme/pkg-b")
+ _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
+ _make_apm_modules_dir(root, "acme/pkg-a")
+ _make_apm_modules_dir(root, "acme/pkg-b")
- _write_lockfile(root / "apm.lock", [
- LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
- LockedDependency(repo_url="acme/pkg-b", depth=2, resolved_by="acme/pkg-a", resolved_commit="bbb"),
- ])
+ _write_lockfile(root / "apm.lock", [
+ LockedDependency(repo_url="acme/pkg-a", depth=1, resolved_commit="aaa"),
+ LockedDependency(repo_url="acme/pkg-b", depth=2, resolved_by="acme/pkg-a", resolved_commit="bbb"),
+ ])
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a", "--dry-run"])
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a", "--dry-run"])
- assert result.exit_code == 0
- assert "acme/pkg-b" in result.output
- assert "transitive" in result.output.lower()
- # Verify nothing was actually removed
- assert (root / "apm_modules" / "acme" / "pkg-a").exists()
- assert (root / "apm_modules" / "acme" / "pkg-b").exists()
+ assert result.exit_code == 0
+ assert "acme/pkg-b" in result.output
+ assert "transitive" in result.output.lower()
+ # Verify nothing was actually removed
+ assert (root / "apm_modules" / "acme" / "pkg-a").exists()
+ assert (root / "apm_modules" / "acme" / "pkg-b").exists()
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
def test_uninstall_no_lockfile_still_works(self):
"""Uninstall works gracefully when no lockfile exists (no transitive cleanup)."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
+ try:
+ root = Path(tmp_dir)
- _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
- _make_apm_modules_dir(root, "acme/pkg-a")
+ _write_apm_yml(root / "apm.yml", ["acme/pkg-a"])
+ _make_apm_modules_dir(root, "acme/pkg-a")
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
- assert result.exit_code == 0
- assert not (root / "apm_modules" / "acme" / "pkg-a").exists()
+ assert result.exit_code == 0
+ assert not (root / "apm_modules" / "acme" / "pkg-a").exists()
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
def test_uninstall_dry_run_supports_object_style_dependency_entries(self):
"""Dry-run accepts dict dependency entries without crashing."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
-
- data = {
- "name": "test-project",
- "version": "1.0.0",
- "dependencies": {
- "apm": [{"git": "acme/pkg-a"}],
- },
- }
- (root / "apm.yml").write_text(
- yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
- )
- _make_apm_modules_dir(root, "acme/pkg-a")
-
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a", "--dry-run"])
-
- assert result.exit_code == 0
- assert "Dry run complete" in result.output
- assert (root / "apm_modules" / "acme" / "pkg-a").exists()
+ try:
+ root = Path(tmp_dir)
+
+ data = {
+ "name": "test-project",
+ "version": "1.0.0",
+ "dependencies": {
+ "apm": [{"git": "acme/pkg-a"}],
+ },
+ }
+ (root / "apm.yml").write_text(
+ yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
+ )
+ _make_apm_modules_dir(root, "acme/pkg-a")
+
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a", "--dry-run"])
+
+ assert result.exit_code == 0
+ assert "Dry run complete" in result.output
+ assert (root / "apm_modules" / "acme" / "pkg-a").exists()
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
def test_uninstall_reintegrates_remaining_object_style_dependency_from_canonical_path(self):
"""Remaining dict-style deps re-integrate from DependencyReference install paths."""
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
- root = Path(tmp_dir)
-
- remaining_dep_entry = {
- "git": "acme/pkg-b",
- "path": "prompts/review.prompt.md",
- }
- data = {
- "name": "test-project",
- "version": "1.0.0",
- "dependencies": {
- "apm": [
- {"git": "acme/pkg-a"},
- remaining_dep_entry,
- ],
- },
- }
- (root / "apm.yml").write_text(
- yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
- )
-
- _make_apm_modules_dir(root, "acme/pkg-a")
- remaining_ref = DependencyReference.parse_from_dict(remaining_dep_entry)
- remaining_install_path = remaining_ref.get_install_path(Path("apm_modules"))
- (root / remaining_install_path).mkdir(parents=True, exist_ok=True)
-
- observed_paths = []
-
- def _capture_validate(path: Path):
- observed_paths.append(path)
- return SimpleNamespace(
- package=APMPackage(name="pkg-b-review", version="1.0.0"),
- package_type=None,
+ try:
+ root = Path(tmp_dir)
+
+ remaining_dep_entry = {
+ "git": "acme/pkg-b",
+ "path": "prompts/review.prompt.md",
+ }
+ data = {
+ "name": "test-project",
+ "version": "1.0.0",
+ "dependencies": {
+ "apm": [
+ {"git": "acme/pkg-a"},
+ remaining_dep_entry,
+ ],
+ },
+ }
+ (root / "apm.yml").write_text(
+ yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
)
- with patch(
- "apm_cli.models.apm_package.validate_apm_package",
- side_effect=_capture_validate,
- ), patch(
- "apm_cli.core.target_detection.detect_target",
- return_value=(None, None),
- ), patch(
- "apm_cli.core.target_detection.should_integrate_claude",
- return_value=False,
- ), patch(
- "apm_cli.integration.prompt_integrator.PromptIntegrator.should_integrate",
- return_value=False,
- ), patch(
- "apm_cli.integration.agent_integrator.AgentIntegrator.should_integrate",
- return_value=False,
- ), patch(
- "apm_cli.integration.skill_integrator.SkillIntegrator.integrate_package_skill",
- return_value=None,
- ), patch(
- "apm_cli.integration.command_integrator.CommandIntegrator.integrate_package_commands",
- return_value=None,
- ), patch(
- "apm_cli.integration.hook_integrator.HookIntegrator.integrate_package_hooks",
- return_value=None,
- ), patch(
- "apm_cli.integration.instruction_integrator.InstructionIntegrator.integrate_package_instructions",
- return_value=None,
- ):
- result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
-
- assert result.exit_code == 0
- assert remaining_install_path in observed_paths
+ _make_apm_modules_dir(root, "acme/pkg-a")
+ remaining_ref = DependencyReference.parse_from_dict(remaining_dep_entry)
+ remaining_install_path = remaining_ref.get_install_path(Path("apm_modules"))
+ (root / remaining_install_path).mkdir(parents=True, exist_ok=True)
+
+ observed_paths = []
+
+ def _capture_validate(path: Path):
+ observed_paths.append(path)
+ return SimpleNamespace(
+ package=APMPackage(name="pkg-b-review", version="1.0.0"),
+ package_type=None,
+ )
+
+ with patch(
+ "apm_cli.models.apm_package.validate_apm_package",
+ side_effect=_capture_validate,
+ ), patch(
+ "apm_cli.core.target_detection.detect_target",
+ return_value=(None, None),
+ ), patch(
+ "apm_cli.core.target_detection.should_integrate_claude",
+ return_value=False,
+ ), patch(
+ "apm_cli.integration.prompt_integrator.PromptIntegrator.should_integrate",
+ return_value=False,
+ ), patch(
+ "apm_cli.integration.agent_integrator.AgentIntegrator.should_integrate",
+ return_value=False,
+ ), patch(
+ "apm_cli.integration.skill_integrator.SkillIntegrator.integrate_package_skill",
+ return_value=None,
+ ), patch(
+ "apm_cli.integration.command_integrator.CommandIntegrator.integrate_package_commands",
+ return_value=None,
+ ), patch(
+ "apm_cli.integration.hook_integrator.HookIntegrator.integrate_package_hooks",
+ return_value=None,
+ ), patch(
+ "apm_cli.integration.instruction_integrator.InstructionIntegrator.integrate_package_instructions",
+ return_value=None,
+ ):
+ result = self.runner.invoke(cli, ["uninstall", "acme/pkg-a"])
+
+ assert result.exit_code == 0
+ assert remaining_install_path in observed_paths
+ finally:
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # restore CWD before TemporaryDirectory cleanup
diff --git a/tests/unit/test_update_command.py b/tests/unit/test_update_command.py
new file mode 100644
index 000000000..5a204524c
--- /dev/null
+++ b/tests/unit/test_update_command.py
@@ -0,0 +1,97 @@
+"""Tests for the platform-aware update command."""
+
+import unittest
+from unittest.mock import Mock, patch
+
+from click.testing import CliRunner
+
+import apm_cli.commands.update as update_module
+from apm_cli.cli import cli
+
+
+class TestUpdateCommand(unittest.TestCase):
+ """Verify update command behavior across supported installer platforms."""
+
+ def setUp(self):
+ self.runner = CliRunner()
+
+ def test_manual_update_command_uses_windows_installer(self):
+ """Windows manual update instructions should point to install.ps1."""
+ with patch.object(update_module.sys, "platform", "win32"):
+ command = update_module._get_manual_update_command()
+
+ self.assertIn("install.ps1", command)
+ self.assertIn("powershell", command.lower())
+
+ @patch("requests.get")
+ @patch("subprocess.run")
+ @patch("apm_cli.commands.update.get_version", return_value="0.6.3")
+ @patch("apm_cli.commands.update.shutil.which", return_value="powershell.exe")
+ @patch("apm_cli.commands.update.os.chmod")
+ @patch("apm_cli.utils.version_checker.get_latest_version_from_github", return_value="0.7.0")
+ def test_update_uses_powershell_installer_on_windows(
+ self,
+ mock_latest,
+ mock_chmod,
+ mock_which,
+ mock_version,
+ mock_run,
+ mock_get,
+ ):
+ """Windows updates should execute the PowerShell installer path."""
+ mock_response = Mock()
+ mock_response.text = "Write-Host 'install'"
+ mock_response.raise_for_status.return_value = None
+ mock_get.return_value = mock_response
+ mock_run.return_value = Mock(returncode=0)
+
+ with patch.object(update_module.sys, "platform", "win32"):
+ result = self.runner.invoke(cli, ["update"])
+
+ self.assertEqual(result.exit_code, 0)
+ self.assertIn("Successfully updated to version 0.7.0", result.output)
+ mock_get.assert_called_once()
+ self.assertTrue(mock_get.call_args.args[0].endswith("install.ps1"))
+ mock_run.assert_called_once()
+ run_command = mock_run.call_args.args[0]
+ self.assertEqual(run_command[:3], ["powershell.exe", "-ExecutionPolicy", "Bypass"])
+ self.assertEqual(run_command[3], "-File")
+ mock_chmod.assert_not_called()
+
+ @patch("requests.get")
+ @patch("subprocess.run")
+ @patch("apm_cli.commands.update.get_version", return_value="0.6.3")
+ @patch("apm_cli.commands.update.os.chmod")
+ @patch("apm_cli.utils.version_checker.get_latest_version_from_github", return_value="0.7.0")
+ def test_update_uses_shell_installer_on_unix(
+ self,
+ mock_latest,
+ mock_chmod,
+ mock_version,
+ mock_run,
+ mock_get,
+ ):
+ """Unix updates should continue to execute the shell installer path."""
+ mock_response = Mock()
+ mock_response.text = "echo install"
+ mock_response.raise_for_status.return_value = None
+ mock_get.return_value = mock_response
+ mock_run.return_value = Mock(returncode=0)
+
+ with patch.object(update_module.sys, "platform", "darwin"), \
+ patch("apm_cli.commands.update.os.path.exists", return_value=True):
+ result = self.runner.invoke(cli, ["update"])
+
+ self.assertEqual(result.exit_code, 0)
+ self.assertIn("Successfully updated to version 0.7.0", result.output)
+ mock_get.assert_called_once()
+ self.assertTrue(mock_get.call_args.args[0].endswith("install.sh"))
+ mock_run.assert_called_once()
+ run_command = mock_run.call_args.args[0]
+ self.assertEqual(run_command[0], "/bin/sh")
+ self.assertEqual(run_command[1][-3:], ".sh")
+ mock_chmod.assert_called_once()
+
+
+if __name__ == "__main__":
+ unittest.main()
\ No newline at end of file
diff --git a/tests/unit/test_version_checker.py b/tests/unit/test_version_checker.py
index 3aa8b4e8a..39769510b 100644
--- a/tests/unit/test_version_checker.py
+++ b/tests/unit/test_version_checker.py
@@ -276,5 +276,25 @@ def test_fetch_failure(self, mock_save, mock_fetch, mock_should_check):
mock_save.assert_called_once()
+class TestCachePathPlatform(unittest.TestCase):
+ """Test platform-specific cache path selection."""
+
+ @patch("pathlib.Path.mkdir")
+ @patch("pathlib.Path.home", return_value=Path("/home/user"))
+ @patch("sys.platform", "linux")
+ def test_unix_cache_path(self, mock_home, mock_mkdir):
+ from apm_cli.utils.version_checker import get_update_cache_path
+ result = get_update_cache_path()
+ assert result == Path("/home/user") / ".cache" / "apm" / "last_version_check"
+
+ @patch("pathlib.Path.mkdir")
+ @patch("pathlib.Path.home", return_value=Path("C:/Users/testuser"))
+ @patch("sys.platform", "win32")
+ def test_windows_cache_path(self, mock_home, mock_mkdir):
+ from apm_cli.utils.version_checker import get_update_cache_path
+ result = get_update_cache_path()
+ assert result == Path("C:/Users/testuser") / "AppData" / "Local" / "apm" / "cache" / "last_version_check"
+
+
if __name__ == "__main__":
unittest.main()