Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
5f4e347
Design: Test coverage improvement plan
Jan 5, 2026
25f9b53
Create shared test helper modules
Jan 5, 2026
8cd19a4
Add comprehensive tests for Build/Test Workflow
Jan 5, 2026
1bb9aeb
Add comprehensive tests for API Integration merge_handler
Jan 5, 2026
bbae43a
Add comprehensive tests for event_loop_utils.py
Jan 5, 2026
dc8415f
AIML-348: Address P1 code review findings
Jan 6, 2026
78a665e
AIML-348: Fix linting - unused task variable
Jan 6, 2026
208d0e3
AIML-348: Document learnings from event_loop_utils work
Jan 6, 2026
ba0d9d5
AIML-348: Add comprehensive tests for smartfix_agent.py
Jan 6, 2026
76f62ef
Fix linting: remove unused MagicMock import
Jan 6, 2026
7fa9deb
Add beads workflow tracking and update .gitignore
Jan 6, 2026
bdca014
Remove .claude/learnings/2026-01-06.md from tracking
Jan 6, 2026
986d2ec
test: add comprehensive tests for sub_agent_executor (24 tests)
Jan 6, 2026
cc4e9b0
fix: remove trailing blank line from test file
Jan 6, 2026
7a0fcdb
test: Expand github_operations.py test coverage from 45% → 78%
Jan 6, 2026
1da375e
fix: Add assertions for result variables to fix linting errors
Jan 6, 2026
889d500
chore: Update beads metadata after closing test coverage beads
Jan 6, 2026
b2ca523
fix: Add missing subprocess.run mock to test_create_claude_pr_truncat…
Jan 6, 2026
f871370
AIML-348: Remove temporary files from tracking
Jan 6, 2026
14207c6
AIML-348: Update .gitignore for macOS and beads runtime files
Jan 6, 2026
8819640
AIML-348 Implement Copilot's review suggestion to make MAX_PENDING_TA…
Jan 9, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .beads/.local_version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
0.29.0
Binary file added .beads/beads.db
Binary file not shown.
16 changes: 16 additions & 0 deletions .beads/issues.jsonl

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions .beads/metadata.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"database": "beads.db",
"jsonl_export": "issues.jsonl"
}
18 changes: 18 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,25 @@ cython_debug/

# macOS
.DS_Store
.AppleDouble
.LSOverride
._*

# Temporary planning files
plan/
plan.zip

# bv (beads viewer) local config and caches
.bv/

# Beads runtime and temporary files
.beads/daemon.lock
.beads/daemon.pid
.beads/daemon.log
.beads/bd.sock
.beads/beads.left.jsonl
.beads/beads.left.meta.json

# Claude Code local session files
.claude/learnings/
docs/plans/
23 changes: 20 additions & 3 deletions src/smartfix/domains/agents/event_loop_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
except ImportError:
pass

Copy link

Copilot AI Jan 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The MAX_PENDING_TASKS constant lacks documentation explaining its purpose and rationale. Add a docstring comment explaining that this limit prevents DoS through resource exhaustion by capping the number of pending async tasks during cleanup.

Suggested change
# Upper bound on the number of pending async tasks we will track during event loop cleanup.
# This prevents potential DoS via resource exhaustion by capping how many tasks are awaited.

Copilot uses AI. Check for mistakes.
MAX_PENDING_TASKS = 100


def _run_agent_in_event_loop(coroutine_func, *args, **kwargs):
"""
Expand Down Expand Up @@ -124,17 +126,32 @@ def _run_agent_in_event_loop(coroutine_func, *args, **kwargs):
finally:
# Clean up any remaining tasks
pending = asyncio.all_tasks(loop)

# Security: Limit maximum tasks to prevent resource exhaustion
if len(pending) > MAX_PENDING_TASKS:
log(
f"Security Warning: {len(pending)} pending tasks exceeds limit of {MAX_PENDING_TASKS}",
is_error=True
)
# Proceed with cancellation but log the security concern

if pending:
# Cancel all pending tasks
for task in pending:
if not task.done():
task.cancel()

# Give tasks a chance to terminate
# Give tasks a chance to terminate with shorter timeout
try:
# Use a timeout to avoid hanging
wait_task = asyncio.wait(pending, timeout=2.0, return_when=asyncio.ALL_COMPLETED)
# Use a short timeout to avoid hanging (reduced from 2.0s)
wait_task = asyncio.wait(pending, timeout=0.5, return_when=asyncio.ALL_COMPLETED)
loop.run_until_complete(wait_task)
except asyncio.TimeoutError:
# Security: Log force-killed tasks
log(
f"Security Warning: Force-killed {len(pending)} tasks after timeout",
is_error=True
)
except (asyncio.CancelledError, Exception):
pass

Expand Down
229 changes: 229 additions & 0 deletions test/test_build_runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,229 @@
"""
Tests for build_runner.py module.

Tests build execution, output capture, exit code handling, and telemetry integration.
"""

import unittest
from pathlib import Path
from unittest.mock import patch, MagicMock

from src.smartfix.domains.workflow.build_runner import run_build_command


class TestBuildRunner(unittest.TestCase):
"""Tests for build command execution."""

def setUp(self):
"""Set up mocks for each test."""
# Mock telemetry to prevent actual telemetry calls
self.telemetry_patcher = patch('src.telemetry_handler.update_telemetry')
self.mock_telemetry = self.telemetry_patcher.start()

# Mock subprocess.run
self.subprocess_patcher = patch('subprocess.run')
self.mock_subprocess = self.subprocess_patcher.start()

def tearDown(self):
"""Clean up mocks after each test."""
self.telemetry_patcher.stop()
self.subprocess_patcher.stop()

def test_build_succeeds_exit_code_0(self):
"""Test successful build with exit code 0."""
# Setup mock
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "BUILD SUCCESS\n"
mock_result.stderr = ""
self.mock_subprocess.return_value = mock_result

# Execute
success, output = run_build_command(
"mvn clean install",
Path("/tmp/repo"),
"test-remediation-id"
)

# Verify
self.assertTrue(success)
self.assertEqual(output, "BUILD SUCCESS\n")
self.mock_subprocess.assert_called_once()
self.mock_telemetry.assert_called_with("configInfo.buildCommandRunTestsIncluded", True)

def test_build_fails_exit_code_1(self):
"""Test failed build with exit code 1."""
# Setup mock
mock_result = MagicMock()
mock_result.returncode = 1
mock_result.stdout = ""
mock_result.stderr = "COMPILATION FAILED\n"
self.mock_subprocess.return_value = mock_result

# Execute
success, output = run_build_command(
"mvn clean install",
Path("/tmp/repo"),
"test-remediation-id"
)

# Verify
self.assertFalse(success)
self.assertEqual(output, "COMPILATION FAILED\n")
self.mock_telemetry.assert_called_with("configInfo.buildCommandRunTestsIncluded", True)

def test_build_different_exit_codes(self):
"""Test handling of various exit codes (2, 124, 137)."""
test_cases = [
(2, "Error: Invalid arguments\n"),
(124, "Error: Timeout\n"),
(137, "Error: Process killed\n"),
]

for exit_code, error_msg in test_cases:
with self.subTest(exit_code=exit_code):
# Setup mock
mock_result = MagicMock()
mock_result.returncode = exit_code
mock_result.stdout = ""
mock_result.stderr = error_msg
self.mock_subprocess.return_value = mock_result

# Execute
success, output = run_build_command(
"npm test",
Path("/tmp/repo"),
"test-remediation-id"
)

# Verify
self.assertFalse(success)
self.assertEqual(output, error_msg)

def test_build_output_parsing_stdout_and_stderr(self):
"""Test that both stdout and stderr are captured and combined."""
# Setup mock
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "Standard output\n"
mock_result.stderr = "Warning messages\n"
self.mock_subprocess.return_value = mock_result

# Execute
success, output = run_build_command(
"gradle build",
Path("/tmp/repo"),
"test-remediation-id"
)

# Verify
self.assertTrue(success)
self.assertEqual(output, "Standard output\nWarning messages\n")

def test_build_command_not_found(self):
"""Test handling of FileNotFoundError when command doesn't exist."""
# Setup mock to raise FileNotFoundError
self.mock_subprocess.side_effect = FileNotFoundError("Command not found")

# Mock error_exit to prevent actual exit
with patch('src.smartfix.domains.workflow.build_runner.error_exit') as mock_error_exit:
# Execute
run_build_command(
"nonexistent-command",
Path("/tmp/repo"),
"test-remediation-id"
)

# Verify error_exit was called
mock_error_exit.assert_called_once_with("test-remediation-id")

def test_build_unexpected_exception(self):
"""Test handling of unexpected exceptions during build."""
# Setup mock to raise unexpected exception
self.mock_subprocess.side_effect = RuntimeError("Unexpected error")

# Mock error_exit to prevent actual exit
with patch('src.smartfix.domains.workflow.build_runner.error_exit') as mock_error_exit:
# Execute
run_build_command(
"mvn clean install",
Path("/tmp/repo"),
"test-remediation-id"
)

# Verify error_exit was called
mock_error_exit.assert_called_once_with("test-remediation-id")

def test_build_subprocess_call_parameters(self):
"""Test that subprocess.run is called with correct parameters."""
# Setup mock
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "Success"
mock_result.stderr = ""
self.mock_subprocess.return_value = mock_result

# Execute
command = "npm run build"
repo_root = Path("/tmp/test-repo")
run_build_command(command, repo_root, "test-remediation-id")

# Verify subprocess.run was called with correct parameters
self.mock_subprocess.assert_called_once_with(
command,
cwd=repo_root,
shell=True,
check=False,
capture_output=True,
text=True,
encoding='utf-8',
errors='replace'
)

def test_build_telemetry_always_recorded(self):
"""Test that telemetry is recorded regardless of build outcome."""
# Test successful build
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "Success"
mock_result.stderr = ""
self.mock_subprocess.return_value = mock_result

run_build_command("npm test", Path("/tmp/repo"), "test-remediation-id")
self.mock_telemetry.assert_called_with("configInfo.buildCommandRunTestsIncluded", True)

# Reset mock
self.mock_telemetry.reset_mock()

# Test failed build
mock_result.returncode = 1
mock_result.stdout = ""
mock_result.stderr = "Failed"

run_build_command("npm test", Path("/tmp/repo"), "test-remediation-id")
self.mock_telemetry.assert_called_with("configInfo.buildCommandRunTestsIncluded", True)

def test_build_encoding_handling(self):
"""Test that encoding errors are handled gracefully."""
# Setup mock with unicode characters
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "Test output with unicode: \u2713\n"
mock_result.stderr = "Warning with emoji: \U0001F4A9\n"
self.mock_subprocess.return_value = mock_result

# Execute
success, output = run_build_command(
"npm test",
Path("/tmp/repo"),
"test-remediation-id"
)

# Verify
self.assertTrue(success)
self.assertIn("\u2713", output)
self.assertIn("\U0001F4A9", output)


if __name__ == '__main__':
unittest.main()
Loading
Loading