Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 84 additions & 13 deletions apps/backend/core/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import json
import logging
import os
import platform
import shutil
import subprocess
import threading
Expand All @@ -24,11 +25,11 @@
from typing import Any

from core.platform import (
get_claude_detection_paths_structured,

Check failure on line 28 in apps/backend/core/client.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F401)

apps/backend/core/client.py:28:5: F401 `core.platform.get_claude_detection_paths_structured` imported but unused
get_comspec_path,

Check failure on line 29 in apps/backend/core/client.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F401)

apps/backend/core/client.py:29:5: F401 `core.platform.get_comspec_path` imported but unused
is_macos,

Check failure on line 30 in apps/backend/core/client.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F401)

apps/backend/core/client.py:30:5: F401 `core.platform.is_macos` imported but unused
is_windows,
validate_cli_path,

Check failure on line 32 in apps/backend/core/client.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F401)

apps/backend/core/client.py:32:5: F401 `core.platform.validate_cli_path` imported but unused
)

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -140,18 +141,83 @@
_CLI_CACHE_LOCK = threading.Lock()


def _get_claude_detection_paths() -> dict[str, list[str] | str]:
def _get_claude_detection_paths() -> dict[str, list[str]]:
"""
Get all candidate paths for Claude CLI detection.

This is a thin wrapper around the platform module's implementation.
See core/platform/__init__.py:get_claude_detection_paths_structured()
for the canonical implementation.
Returns platform-specific paths where Claude CLI might be installed.

IMPORTANT: This function mirrors the frontend's getClaudeDetectionPaths()
in apps/frontend/src/main/cli-tool-manager.ts. Both implementations MUST
be kept in sync to ensure consistent detection behavior across the
Python backend and Electron frontend.

When adding new detection paths, update BOTH:
1. This function (_get_claude_detection_paths in client.py)
2. getClaudeDetectionPaths() in cli-tool-manager.ts

Returns:
Dict with 'homebrew', 'platform', and 'nvm_versions_dir' keys
Dict with 'homebrew', 'platform', and 'nvm' path lists
"""
return get_claude_detection_paths_structured()
home_dir = Path.home()
is_windows = platform.system() == "Windows"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Inconsistent platform detection pattern - local variables shadow imported functions.

Multiple places create local is_windows variables using platform.system() == "Windows", which shadows the imported is_windows function from core.platform. This violates the coding guideline to use platform abstraction functions and creates confusion.

♻️ Proposed fix

Use the imported is_windows() function directly instead of creating local variables:

 def _get_claude_detection_paths() -> dict[str, list[str]]:
     ...
     home_dir = Path.home()
-    is_windows = platform.system() == "Windows"
-
-    ...
-
-    if is_windows:
+    if is_windows():
         platform_paths = [

Apply similar changes to _validate_claude_cli() (line 248) and find_claude_cli() (line 320).

This also allows removing import platform from line 19 if no other usages remain.

As per coding guidelines: "Do not check process.platform directly in code - always import platform detection functions from the platform abstraction module."

Also applies to: 248-248, 320-320

🤖 Prompt for AI Agents
In `@apps/backend/core/client.py` at line 163, Replace local platform checks that
assign is_windows = platform.system() == "Windows" with calls to the platform
abstraction is_windows() function imported from core.platform; specifically
update the occurrences in the module-level code, the _validate_claude_cli()
function, and the find_claude_cli() function to use is_windows() directly (and
remove the unused platform import if there are no other references). Ensure you
do not shadow the imported is_windows symbol with a local variable and keep
existing logic branches intact while switching to is_windows().


homebrew_paths = [
"/opt/homebrew/bin/claude", # Apple Silicon
"/usr/local/bin/claude", # Intel Mac
]

if is_windows:
platform_paths = [
str(home_dir / "AppData" / "Local" / "Programs" / "claude" / "claude.exe"),
str(home_dir / "AppData" / "Roaming" / "npm" / "claude.cmd"),
str(home_dir / ".local" / "bin" / "claude.exe"),
"C:\\Program Files\\Claude\\claude.exe",
"C:\\Program Files (x86)\\Claude\\claude.exe",
]
else:
platform_paths = [
str(home_dir / ".local" / "bin" / "claude"),
str(home_dir / "bin" / "claude"),
]

nvm_versions_dir = str(home_dir / ".nvm" / "versions" / "node")

return {
"homebrew": homebrew_paths,
"platform": platform_paths,
"nvm_versions_dir": nvm_versions_dir,
}


def _is_secure_path(path_str: str) -> bool:
"""
Validate that a path doesn't contain dangerous characters.

Prevents command injection attacks by rejecting paths with shell metacharacters,
directory traversal patterns, or environment variable expansion.

Args:
path_str: Path to validate

Returns:
True if the path is safe, False otherwise
"""
import re

dangerous_patterns = [
r'[;&|`${}[\]<>!"^]', # Shell metacharacters
r"%[^%]+%", # Windows environment variable expansion
r"\.\./", # Unix directory traversal
r"\.\.\\", # Windows directory traversal
r"[\r\n]", # Newlines (command injection)
]

for pattern in dangerous_patterns:
if re.search(pattern, path_str):
return False

return True


def _validate_claude_cli(cli_path: str) -> tuple[bool, str | None]:
Expand All @@ -174,11 +240,13 @@
import re

# Security validation: reject paths with shell metacharacters or directory traversal
if not validate_cli_path(cli_path):
if not _is_secure_path(cli_path):
logger.warning(f"Rejecting insecure Claude CLI path: {cli_path}")
return False, None

try:
is_windows = platform.system() == "Windows"

# Augment PATH with the CLI directory for proper resolution
env = os.environ.copy()
cli_dir = os.path.dirname(cli_path)
Expand All @@ -189,9 +257,11 @@
# /d = disable AutoRun registry commands
# /s = strip first and last quotes, preserving inner quotes
# /c = run command then terminate
if is_windows() and cli_path.lower().endswith((".cmd", ".bat")):
# Get cmd.exe path from platform module
cmd_exe = get_comspec_path()
if is_windows and cli_path.lower().endswith((".cmd", ".bat")):
# Get cmd.exe path from environment or use default
cmd_exe = os.environ.get("ComSpec") or os.path.join(
os.environ.get("SystemRoot", "C:\\Windows"), "System32", "cmd.exe"
)
# Use double-quoted command line for paths with spaces
cmd_line = f'""{cli_path}" --version"'
result = subprocess.run(
Expand All @@ -209,7 +279,7 @@
text=True,
timeout=5,
env=env,
creationflags=subprocess.CREATE_NO_WINDOW if is_windows() else 0,
creationflags=subprocess.CREATE_NO_WINDOW if is_windows else 0,
)

if result.returncode == 0:
Expand Down Expand Up @@ -247,6 +317,7 @@
logger.debug(f"Using cached Claude CLI path: {cached}")
return cached

is_windows = platform.system() == "Windows"
paths = _get_claude_detection_paths()

# 1. Check environment variable override
Expand All @@ -272,7 +343,7 @@
return which_path

# 3. Homebrew paths (macOS)
if is_macos():
if platform.system() == "Darwin":
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Use is_macos() instead of direct platform check.

This line uses platform.system() == "Darwin" directly instead of the imported is_macos() function from core.platform.

♻️ Proposed fix
     # 3. Homebrew paths (macOS)
-    if platform.system() == "Darwin":
+    if is_macos():
         for hb_path in paths["homebrew"]:
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if platform.system() == "Darwin":
# 3. Homebrew paths (macOS)
if is_macos():
for hb_path in paths["homebrew"]:
🤖 Prompt for AI Agents
In `@apps/backend/core/client.py` at line 346, Replace the direct platform check
platform.system() == "Darwin" with the imported helper is_macos() from
core.platform; locate the conditional in client.py (the if using
platform.system()) and change it to call is_macos() so the code uses the
centralized platform detection logic (ensure is_macos is imported where used).

for hb_path in paths["homebrew"]:
if Path(hb_path).exists():
valid, version = _validate_claude_cli(hb_path)
Expand All @@ -283,7 +354,7 @@
return hb_path

# 4. NVM paths (Unix only) - check Node.js version manager installations
if not is_windows():
if not is_windows:
nvm_dir = Path(paths["nvm_versions_dir"])
if nvm_dir.exists():
try:
Expand Down
51 changes: 48 additions & 3 deletions apps/backend/core/dependency_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
import sys
from pathlib import Path

from core.platform import is_linux, is_windows


def validate_platform_dependencies() -> None:
"""
Expand All @@ -18,14 +20,21 @@ def validate_platform_dependencies() -> None:
with helpful installation instructions.
"""
# Check Windows-specific dependencies
# pywin32 is required on all Python versions on Windows (ACS-306)
# The MCP library unconditionally imports win32api on Windows
if sys.platform == "win32":
if is_windows() and sys.version_info >= (3, 12):
try:
import pywintypes # noqa: F401
except ImportError:
_exit_with_pywin32_error()

# Check Linux-specific dependencies (ACS-310)
# Note: secretstorage is optional for app functionality (falls back to .env),
# but we validate it to ensure proper OAuth token storage via keyring
if is_linux():
try:
import secretstorage # noqa: F401
except ImportError:
_warn_missing_secretstorage()


def _exit_with_pywin32_error() -> None:
"""Exit with helpful error message for missing pywin32."""
Expand Down Expand Up @@ -76,3 +85,39 @@ def _exit_with_pywin32_error() -> None:
"\n"
f"Current Python: {sys.executable}\n"
)


def _warn_missing_secretstorage() -> None:
"""Emit warning message for missing secretstorage.

Note: This is a warning, not a hard error - the app will fall back to .env
file storage for OAuth tokens. We warn users to ensure they understand the
security implications.
"""
# Use sys.prefix to detect the virtual environment path
venv_activate = Path(sys.prefix) / "bin" / "activate"

sys.stderr.write(
"Warning: Linux dependency 'secretstorage' is not installed.\n"
"\n"
"Auto Claude can use secretstorage for secure OAuth token storage via\n"
"the system keyring (gnome-keyring, kwallet, etc.). Without it, tokens\n"
"will be stored in plaintext in your .env file.\n"
"\n"
"To enable keyring integration:\n"
"1. Activate your virtual environment:\n"
f" source {venv_activate}\n"
"\n"
"2. Install secretstorage:\n"
" pip install 'secretstorage>=3.3.3'\n"
"\n"
" Or reinstall all dependencies:\n"
" pip install -r requirements.txt\n"
"\n"
"Note: The app will continue to work, but OAuth tokens will be stored\n"
"in your .env file instead of the system keyring.\n"
"\n"
f"Current Python: {sys.executable}\n"
)
sys.stderr.flush()
# Continue execution - this is a warning, not a blocking error
5 changes: 1 addition & 4 deletions apps/backend/core/simple_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,12 +96,9 @@ def create_simple_client(
"max_turns": max_turns,
"cwd": str(cwd.resolve()) if cwd else None,
"env": sdk_env,
"max_thinking_tokens": max_thinking_tokens,
}

# Only add max_thinking_tokens if not None (Haiku doesn't support extended thinking)
if max_thinking_tokens is not None:
options_kwargs["max_thinking_tokens"] = max_thinking_tokens

# Add CLI path if found
if cli_path:
options_kwargs["cli_path"] = cli_path
Expand Down
104 changes: 0 additions & 104 deletions apps/backend/runners/github/orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -660,122 +660,18 @@

if not has_commits and not has_file_changes:
base_sha = previous_review.reviewed_commit_sha[:8]

# Check if CI status has changed since last review
# If CI was failing before but now passes, we need to update the verdict
current_failing = ci_status.get("failing", 0)
current_awaiting = ci_status.get("awaiting_approval", 0)

# Helper to detect CI-related blockers (includes workflows pending)
def is_ci_blocker(b: str) -> bool:
return b.startswith("CI Failed:") or b.startswith(
"Workflows Pending:"
)

previous_blockers = getattr(previous_review, "blockers", [])
previous_was_blocked_by_ci = (
previous_review.verdict == MergeVerdict.BLOCKED
and any(is_ci_blocker(b) for b in previous_blockers)
)

# Determine the appropriate verdict based on current CI status
# CI/Workflow status check (both block merging)
ci_or_workflow_blocking = current_failing > 0 or current_awaiting > 0

if ci_or_workflow_blocking:
# CI is still failing or workflows pending - keep blocked verdict
updated_verdict = MergeVerdict.BLOCKED
if current_failing > 0:
updated_reasoning = (
f"No code changes since last review. "
f"{current_failing} CI check(s) still failing."
)
failed_checks = ci_status.get("failed_checks", [])
ci_note = (
f" Failing: {', '.join(failed_checks)}"
if failed_checks
else ""
)
no_change_summary = (
f"No new commits since last review. "
f"CI status: {current_failing} check(s) failing.{ci_note}"
)
else:
updated_reasoning = (
f"No code changes since last review. "
f"{current_awaiting} workflow(s) awaiting approval."
)
no_change_summary = (
f"No new commits since last review. "
f"{current_awaiting} workflow(s) awaiting maintainer approval."
)
elif previous_was_blocked_by_ci and not ci_or_workflow_blocking:
# CI/Workflows have recovered! Update verdict to reflect this
safe_print(
"[Followup] CI recovered - updating verdict from BLOCKED",
flush=True,
)
# Check for remaining non-CI blockers (use helper defined above)
non_ci_blockers = [
b for b in previous_blockers if not is_ci_blocker(b)
]

# Determine verdict based on findings AND remaining blockers
if non_ci_blockers:
# There are still non-CI blockers - stay blocked
updated_verdict = MergeVerdict.BLOCKED
updated_reasoning = (
"CI checks now passing. Non-CI blockers still remain: "
+ ", ".join(non_ci_blockers[:3])
)
elif previous_review.findings:
# Check finding severity - only low severity is non-blocking
findings = previous_review.findings
high_medium = [
f
for f in findings
if f.severity
in (
ReviewSeverity.HIGH,
ReviewSeverity.MEDIUM,
ReviewSeverity.CRITICAL,
)
]
if high_medium:
# There are blocking findings - needs revision
updated_verdict = MergeVerdict.NEEDS_REVISION
updated_reasoning = f"CI checks now passing. {len(high_medium)} code finding(s) still require attention."
else:
# Only low-severity findings - safe to merge
updated_verdict = MergeVerdict.READY_TO_MERGE
updated_reasoning = f"CI checks now passing. {len(findings)} non-blocking suggestion(s) to consider."
else:
updated_verdict = MergeVerdict.READY_TO_MERGE
updated_reasoning = (
"CI checks now passing. No outstanding code issues."
)
no_change_summary = (
"No new commits since last review. "
"CI checks are now passing. Previous findings still apply."
)
else:
# No CI-related changes, keep previous verdict
updated_verdict = previous_review.verdict
updated_reasoning = "No changes since last review."
no_change_summary = "No new commits since last review. Previous findings still apply."

safe_print(
f"[Followup] No changes since last review at {base_sha}",
flush=True,
)

# Build blockers list - always filter out CI blockers first, then add current
blockers = list(previous_blockers)

Check failure on line 669 in apps/backend/runners/github/orchestrator.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F821)

apps/backend/runners/github/orchestrator.py:669:33: F821 Undefined name `previous_blockers`
# Remove ALL CI-related blockers (CI Failed + Workflows Pending)
blockers = [b for b in blockers if not is_ci_blocker(b)]

Check failure on line 671 in apps/backend/runners/github/orchestrator.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F821)

apps/backend/runners/github/orchestrator.py:671:56: F821 Undefined name `is_ci_blocker`

# Add back only currently failing CI checks
if current_failing > 0:

Check failure on line 674 in apps/backend/runners/github/orchestrator.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F821)

apps/backend/runners/github/orchestrator.py:674:20: F821 Undefined name `current_failing`
failed_checks = ci_status.get("failed_checks", [])
for check_name in failed_checks:
blocker_msg = f"CI Failed: {check_name}"
Expand All @@ -783,13 +679,13 @@
blockers.append(blocker_msg)

# Add back workflows pending if any
if current_awaiting > 0:

Check failure on line 682 in apps/backend/runners/github/orchestrator.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F821)

apps/backend/runners/github/orchestrator.py:682:20: F821 Undefined name `current_awaiting`
blocker_msg = f"Workflows Pending: {current_awaiting} workflow(s) awaiting maintainer approval"

Check failure on line 683 in apps/backend/runners/github/orchestrator.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F821)

apps/backend/runners/github/orchestrator.py:683:57: F821 Undefined name `current_awaiting`
if blocker_msg not in blockers:
blockers.append(blocker_msg)

# Map verdict to overall_status (consistent with rest of codebase)
if updated_verdict == MergeVerdict.BLOCKED:

Check failure on line 688 in apps/backend/runners/github/orchestrator.py

View workflow job for this annotation

GitHub Actions / Python (Ruff)

Ruff (F821)

apps/backend/runners/github/orchestrator.py:688:20: F821 Undefined name `updated_verdict`
overall_status = "request_changes"
elif updated_verdict == MergeVerdict.NEEDS_REVISION:
overall_status = "request_changes"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -532,31 +532,12 @@ async def review(self, context: PRContext) -> PRReviewResult:
head_sha, context.pr_number
)
project_root = worktree_path
# Count files in worktree to give user visibility (with limit to avoid slowdown)
MAX_FILE_COUNT = 10000
try:
file_count = 0
for f in worktree_path.rglob("*"):
if f.is_file() and ".git" not in f.parts:
file_count += 1
if file_count >= MAX_FILE_COUNT:
break
except (OSError, PermissionError):
file_count = 0
file_count_str = (
f"{file_count:,}+"
if file_count >= MAX_FILE_COUNT
else f"{file_count:,}"
)
# Always log worktree creation with file count (not gated by DEBUG_MODE)
safe_print(
f"[PRReview] Created temporary worktree: {worktree_path.name} ({file_count_str} files)",
flush=True,
)
safe_print(
f"[PRReview] Worktree contains PR branch HEAD: {head_sha[:8]}",
flush=True,
)
if DEBUG_MODE:
safe_print(
f"[PRReview] DEBUG: Using worktree as "
f"project_root={project_root}",
flush=True,
)
except (RuntimeError, ValueError) as e:
if DEBUG_MODE:
safe_print(
Expand Down
Loading
Loading