Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions autobot-backend/agents/overseer/command_explanation_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

from autobot_shared.http_client import get_http_client
from autobot_shared.ssot_config import DEFAULT_LLM_MODEL
from constants.api_constants import PATH_OLLAMA_GENERATE
from dependencies import get_config

from .types import CommandBreakdownPart, CommandExplanation, OutputExplanation
Expand Down Expand Up @@ -53,15 +54,18 @@ def _get_ollama_endpoint(self) -> str:
"""Get Ollama endpoint from config."""
try:
endpoint = get_config().get_ollama_url()
if not endpoint.endswith("/api/generate"):
endpoint = endpoint.rstrip("/") + "/api/generate"
if not endpoint.endswith(PATH_OLLAMA_GENERATE):
endpoint = endpoint.rstrip("/") + PATH_OLLAMA_GENERATE
return endpoint
except Exception as e:
logger.error("Failed to get Ollama endpoint: %s", e)
from config import ConfigManager

config = ConfigManager()
return f"http://{config.get_host('ollama')}:{config.get_port('ollama')}/api/generate"
return (
f"http://{config.get_host('ollama')}:{config.get_port('ollama')}"
+ PATH_OLLAMA_GENERATE
)

def _get_model(self) -> str:
"""Get LLM model from config."""
Expand Down
10 changes: 7 additions & 3 deletions autobot-backend/agents/overseer/overseer_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

from autobot_shared.http_client import get_http_client
from autobot_shared.ssot_config import DEFAULT_LLM_MODEL
from constants.api_constants import PATH_OLLAMA_GENERATE
from dependencies import get_config

from .types import AgentTask, OverseerUpdate, StepResult, StepStatus, TaskPlan
Expand Down Expand Up @@ -112,15 +113,18 @@ def _get_ollama_endpoint(self) -> str:
"""Get Ollama endpoint from config."""
try:
endpoint = get_config().get_ollama_url()
if not endpoint.endswith("/api/generate"):
endpoint = endpoint.rstrip("/") + "/api/generate"
if not endpoint.endswith(PATH_OLLAMA_GENERATE):
endpoint = endpoint.rstrip("/") + PATH_OLLAMA_GENERATE
return endpoint
except Exception as e:
logger.error("Failed to get Ollama endpoint: %s", e)
from config import ConfigManager

config = ConfigManager()
return f"http://{config.get_host('ollama')}:{config.get_port('ollama')}/api/generate"
return (
f"http://{config.get_host('ollama')}:{config.get_port('ollama')}"
+ PATH_OLLAMA_GENERATE
)

def _get_model(self) -> str:
"""Get LLM model from config."""
Expand Down
9 changes: 5 additions & 4 deletions autobot-backend/api/api_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))

from constants.api_constants import PATH_API_HEALTH
from constants.network_constants import NetworkConstants
from tests.benchmarks.benchmark_base import BenchmarkRunner, assert_performance

Expand Down Expand Up @@ -60,14 +61,14 @@ async def test_health_endpoint_benchmark(self, runner, mock_httpx_client):

async def health_check():
async with httpx.AsyncClient() as client:
response = await client.get(f"{BASE_URL}/api/health")
response = await client.get(f"{BASE_URL}{PATH_API_HEALTH}")
return response.json()

result = await runner.run_async_benchmark(
name="api_health_endpoint",
func=health_check,
iterations=20,
metadata={"endpoint": "/api/health", "method": "GET"},
metadata={"endpoint": PATH_API_HEALTH, "method": "GET"},
)

logger.info("Health Endpoint Benchmark:")
Expand Down Expand Up @@ -162,15 +163,15 @@ async def test_concurrent_requests_benchmark(self, runner, mock_httpx_client):

async def concurrent_health_checks():
async with httpx.AsyncClient() as client:
tasks = [client.get(f"{BASE_URL}/api/health") for _ in range(5)]
tasks = [client.get(f"{BASE_URL}{PATH_API_HEALTH}") for _ in range(5)]
responses = await asyncio.gather(*tasks)
return [r.json() for r in responses]

result = await runner.run_async_benchmark(
name="api_concurrent_requests",
func=concurrent_health_checks,
iterations=10,
metadata={"concurrent_requests": 5, "endpoint": "/api/health"},
metadata={"concurrent_requests": 5, "endpoint": PATH_API_HEALTH},
)

logger.info("Concurrent Requests Benchmark (5 concurrent):")
Expand Down
13 changes: 7 additions & 6 deletions autobot-backend/chat_workflow/llm_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from async_chat_workflow import WorkflowMessage
from autobot_shared.http_client import get_http_client
from constants.api_constants import PATH_OLLAMA_GENERATE
from constants.model_constants import ModelConstants
from dependencies import get_config
from extensions.base import HookContext
Expand Down Expand Up @@ -151,8 +152,8 @@ def _get_ollama_endpoint(self) -> str:
)
if endpoint and endpoint.startswith(_VALID_URL_SCHEMES): # Issue #380
# Ensure /api/generate path is included
if not endpoint.endswith("/api/generate"):
endpoint = endpoint.rstrip("/") + "/api/generate"
if not endpoint.endswith(PATH_OLLAMA_GENERATE):
endpoint = endpoint.rstrip("/") + PATH_OLLAMA_GENERATE
return endpoint
logger.error(
"Invalid endpoint URL: %s, using config-based default", endpoint
Expand All @@ -171,8 +172,8 @@ def _get_ollama_endpoint_for_model(self, model_name: str) -> str:
try:
base_url = get_config().get_ollama_endpoint_for_model(model_name)
if base_url and base_url.startswith(_VALID_URL_SCHEMES):
if not base_url.endswith("/api/generate"):
base_url = base_url.rstrip("/") + "/api/generate"
if not base_url.endswith(PATH_OLLAMA_GENERATE):
base_url = base_url.rstrip("/") + PATH_OLLAMA_GENERATE
return base_url
except Exception as e:
logger.warning("Model endpoint routing failed: %s", e)
Expand Down Expand Up @@ -374,8 +375,8 @@ async def _prepare_llm_request_params(
# then fall back to local config-based resolution (#1070 model routing).
slm_base = await self._discover_ollama_from_slm()
if slm_base:
if not slm_base.endswith("/api/generate"):
slm_base = slm_base.rstrip("/") + "/api/generate"
if not slm_base.endswith(PATH_OLLAMA_GENERATE):
slm_base = slm_base.rstrip("/") + PATH_OLLAMA_GENERATE
ollama_endpoint = slm_base
else:
ollama_endpoint = self._get_ollama_endpoint_for_model(selected_model)
Expand Down
7 changes: 7 additions & 0 deletions autobot-backend/constants/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,13 @@
from .threshold_constants import (
StringParsingConstants, # Issue #380: Centralized string parsing
)
from .api_constants import ( # Issue #3531: Centralized API path constants
PATH_API_HEALTH,
PATH_HEALTH,
PATH_OLLAMA_CHAT,
PATH_OLLAMA_GENERATE,
PATH_OLLAMA_TAGS,
)
from .threshold_constants import ( # Issue #318: Threshold and timing constants
AgentThresholds,
BatchConfig,
Expand Down
17 changes: 17 additions & 0 deletions autobot-backend/constants/api_constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# AutoBot - AI-Powered Automation Platform
# Copyright (c) 2025 mrveiss
# Author: mrveiss
"""Centralised API path and endpoint constants.

Issue #3531: Replace hardcoded path strings across the codebase with
named constants so all endpoint paths are defined in one place.
"""

# Health check endpoints
PATH_HEALTH = "/health"
PATH_API_HEALTH = "/api/health"

# Ollama inference endpoints
PATH_OLLAMA_GENERATE = "/api/generate"
PATH_OLLAMA_CHAT = "/api/chat"
PATH_OLLAMA_TAGS = "/api/tags"
6 changes: 3 additions & 3 deletions autobot-backend/constants/threshold_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,9 +487,9 @@ class ProtocolDefaults:
WSS: str = "wss"
TCP: str = "tcp"

# Health endpoints
HEALTH_ENDPOINT: str = "/health"
API_HEALTH_ENDPOINT: str = "/api/health"
# Health endpoints — imported from api_constants (Issue #3531)
from constants.api_constants import PATH_HEALTH as HEALTH_ENDPOINT # noqa: F401
from constants.api_constants import PATH_API_HEALTH as API_HEALTH_ENDPOINT # noqa: F401

# API version
API_VERSION: str = "1.0"
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

from autobot_shared.http_client import get_http_client
from autobot_shared.ssot_config import get_ollama_url
from constants.api_constants import PATH_OLLAMA_TAGS

from ..models import LLMRequest, LLMResponse, LLMSettings
from ..streaming import StreamingManager
Expand Down Expand Up @@ -70,7 +71,7 @@ async def test_environment(self) -> EnvironmentTestResult:
http_client = get_http_client()
timeout = aiohttp.ClientTimeout(total=5.0)
async with await http_client.get(
f"{ollama_url}/api/tags", timeout=timeout
f"{ollama_url}{PATH_OLLAMA_TAGS}", timeout=timeout
) as resp:
if resp.status == 200:
data = await resp.json()
Expand Down
3 changes: 2 additions & 1 deletion autobot-backend/llm_interface_pkg/providers/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from autobot_shared.ssot_config import get_ollama_url
from circuit_breaker import circuit_breaker_async
from config import ConfigManager
from constants.api_constants import PATH_OLLAMA_CHAT

from ..models import LLMRequest, LLMResponse, LLMSettings
from ..streaming import StreamingManager
Expand Down Expand Up @@ -444,7 +445,7 @@ def _prepare_chat_request(self, request: LLMRequest) -> tuple:
Tuple of (url, headers, model, use_streaming, data, span_attrs)
"""
self.ollama_host = self.get_host_from_env()
url = f"{self.ollama_host}/api/chat"
url = f"{self.ollama_host}{PATH_OLLAMA_CHAT}"
headers = {"Content-Type": "application/json"}

model = request.model_name or self.settings.default_model
Expand Down
7 changes: 4 additions & 3 deletions autobot-backend/llm_providers/ollama_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

from autobot_shared.http_client import get_http_client
from autobot_shared.ssot_config import get_ollama_url
from constants.api_constants import PATH_OLLAMA_CHAT, PATH_OLLAMA_TAGS
from llm_interface_pkg.models import LLMRequest, LLMResponse
from llm_interface_pkg.types import ProviderType

Expand Down Expand Up @@ -129,7 +130,7 @@ async def stream_completion(self, request: LLMRequest) -> AsyncIterator[str]:
http_client = get_http_client()
timeout = aiohttp.ClientTimeout(total=None, connect=5.0, sock_read=None)
async with await http_client.post(
f"{base_url}/api/chat",
f"{base_url}{PATH_OLLAMA_CHAT}",
headers={"Content-Type": "application/json"},
json=payload,
timeout=timeout,
Expand Down Expand Up @@ -160,7 +161,7 @@ async def is_available(self) -> bool:
http_client = get_http_client()
timeout = aiohttp.ClientTimeout(total=5.0)
async with await http_client.get(
f"{self._resolve_base_url()}/api/tags",
f"{self._resolve_base_url()}{PATH_OLLAMA_TAGS}",
timeout=timeout,
) as resp:
return resp.status == 200
Expand All @@ -173,7 +174,7 @@ async def list_models(self) -> List[str]:
http_client = get_http_client()
timeout = aiohttp.ClientTimeout(total=10.0)
async with await http_client.get(
f"{self._resolve_base_url()}/api/tags",
f"{self._resolve_base_url()}{PATH_OLLAMA_TAGS}",
timeout=timeout,
) as resp:
if resp.status == 200:
Expand Down
3 changes: 2 additions & 1 deletion autobot-backend/middleware/audit_middleware.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ async def my_endpoint(request: Request):
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.types import ASGIApp

from constants.api_constants import PATH_API_HEALTH
from middleware.proxy_utils import get_client_ip
from services.audit_logger import AuditResult, get_audit_logger

Expand Down Expand Up @@ -135,7 +136,7 @@ def __init__(
self.exclude_paths = exclude_paths or [
"/docs",
"/openapi.json",
"/api/health",
PATH_API_HEALTH,
"/api/metrics",
"/static",
]
Expand Down
5 changes: 3 additions & 2 deletions autobot-backend/middleware/service_auth_enforcement.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from fastapi import HTTPException, Request
from fastapi.responses import JSONResponse

from constants.api_constants import PATH_API_HEALTH, PATH_HEALTH
from security.service_auth import validate_service_auth

logger = structlog.get_logger()
Expand All @@ -32,8 +33,8 @@
# Endpoints that DO NOT require service authentication (frontend-accessible)
EXEMPT_PATHS: List[str] = [
# Health and version endpoints
"/health", # Health check (no /api prefix)
"/api/health", # API health check
PATH_HEALTH, # Health check (no /api prefix)
PATH_API_HEALTH, # API health check
"/api/version", # Version information
# User-facing chat and conversation endpoints
"/api/chat",
Expand Down
6 changes: 4 additions & 2 deletions autobot-backend/middleware/service_auth_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
from fastapi import Request
from starlette.middleware.base import BaseHTTPMiddleware

from constants.api_constants import PATH_API_HEALTH, PATH_HEALTH

from security.service_auth import validate_service_auth

logger = structlog.get_logger()
Expand Down Expand Up @@ -41,8 +43,8 @@ def _is_exempt_path(self, request: Request) -> bool:
Return True if the request path matches a service-auth exemption.
"""
skip_paths = [
"/health", # Health check (no prefix)
"/api/health", # General health check
PATH_HEALTH, # Health check (no prefix)
PATH_API_HEALTH, # General health check
"/api/version", # Version info
"/docs", # API documentation
"/openapi.json", # OpenAPI spec
Expand Down
5 changes: 3 additions & 2 deletions autobot-backend/middleware/tracing_middleware.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from starlette.requests import Request
from starlette.responses import Response

from constants.api_constants import PATH_API_HEALTH, PATH_HEALTH
from middleware.proxy_utils import get_client_ip
from services.tracing_service import get_tracing_service

Expand All @@ -45,8 +46,8 @@ class TracingMiddleware(BaseHTTPMiddleware):

# Paths to exclude from detailed tracing (health checks, metrics, etc.)
EXCLUDED_PATHS = {
"/health",
"/api/health",
PATH_HEALTH,
PATH_API_HEALTH,
"/metrics",
"/api/metrics",
"/favicon.ico",
Expand Down
3 changes: 2 additions & 1 deletion autobot-backend/utils/connection_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from autobot_shared.redis_client import get_redis_client
from autobot_shared.ssot_config import get_ollama_url
from config import config as global_config_manager
from constants.api_constants import PATH_OLLAMA_GENERATE, PATH_OLLAMA_TAGS
from constants.model_constants import ModelConstants
from constants.network_constants import NetworkConstants
from type_defs.common import Metadata
Expand Down Expand Up @@ -206,7 +207,7 @@ async def test_ollama_connection() -> Metadata:
endpoint, model
)

check_url = endpoint.replace("/api/generate", "/api/tags")
check_url = endpoint.replace(PATH_OLLAMA_GENERATE, PATH_OLLAMA_TAGS)
timeout = aiohttp.ClientTimeout(total=10)

async with aiohttp.ClientSession(timeout=timeout) as session:
Expand Down
3 changes: 2 additions & 1 deletion autobot-backend/utils/hardware_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import psutil

# Import existing monitoring infrastructure
from constants.api_constants import PATH_API_HEALTH

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -684,7 +685,7 @@ def _get_service_configurations(self) -> List[Dict[str, Any]]:
"name": "Backend API",
"host": ssot_config.vm.main,
"port": 8001,
"path": "/api/health",
"path": PATH_API_HEALTH,
},
{
"name": "Frontend",
Expand Down
Loading
Loading