diff --git a/.dockerignore b/.dockerignore
index 790ff9ed4c..8092340583 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -5,9 +5,10 @@
# Large / generated data
memory/**
-# Logs & tmp
+# Logs, tmp, usr
logs/*
tmp/*
+usr/*
# Knowledge directory β keep only default/
knowledge/**
diff --git a/.gitignore b/.gitignore
index 17d47e1c82..c33c0598cf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,10 +2,12 @@
**/.DS_Store
**/.env
**/__pycache__/
+*.py[cod]
**/.conda/
-#Ignore cursor rules
+#Ignore IDE files
.cursor/
+.windsurf/
# ignore test files in root dir
/*.test.py
@@ -20,8 +22,9 @@ memory/**
# Handle logs directory
logs/*
-# Handle tmp directory
+# Handle tmp and usr directory
tmp/*
+usr/*
# Handle knowledge directory
knowledge/**
@@ -39,4 +42,7 @@ instruments/**
# Global rule to include .gitkeep files anywhere
!**/.gitkeep
-agent_history.gif
\ No newline at end of file
+
+# for browser-use
+agent_history.gif
+
diff --git a/README.md b/README.md
index b0d5c765ff..d2c0d9b4bc 100644
--- a/README.md
+++ b/README.md
@@ -29,14 +29,8 @@ Or see DeepWiki generated documentation:
-> ### π¨ **IMPORTANT ANNOUNCEMENT** π¨
-
-The original GitHub and DockerHub repositories for Agent Zero have been transferred to a new namespace:
-
-- **GitHub & DockerHub:** `agent0ai/agent-zero`
-
-From now on, please use this name for both `git clone` and `docker pull` commands.
-
+> ### π¨ **PROJECTS!** π¨
+Agent Zero now supports **Projects** β isolated workspaces with their own prompts, files, memory, and secrets, so you can create dedicated setups for each use case without mixing contexts.
@@ -87,6 +81,7 @@ From now on, please use this name for both `git clone` and `docker pull` command
- The framework does not guide or limit the agent in any way. There are no hard-coded rails that agents have to follow.
- Every prompt, every small message template sent to the agent in its communication loop can be found in the **prompts/** folder and changed.
- Every default tool can be found in the **python/tools/** folder and changed or copied to create new predefined tools.
+- **Automated configuration** via `A0_SET_` environment variables for deployment automation and easy setup.

@@ -172,6 +167,26 @@ docker run -p 50001:80 agent0ai/agent-zero
## π― Changelog
+### v0.9.7 - Projects
+[Release video](https://youtu.be/RrTDp_v9V1c)
+- Projects management
+ - Support for custom instructions
+ - Integration with memory, knowledge, files
+ - Project specific secrets
+- New Welcome screen/Dashboard
+- New Wait tool
+- Subordinate agent configuration override support
+- Support for multiple documents at once in document_query_tool
+- Improved context on interventions
+- Openrouter embedding support
+- Frontend components refactor and polishing
+- SSH metadata output fix
+- Support for windows powershell in local TTY utility
+- More efficient selective streaming for LLMs
+- UI output length limit improvements
+
+
+
### v0.9.6 - Memory Dashboard
[Release video](https://youtu.be/sizjAq2-d9s)
- Memory Management Dashboard
diff --git a/agent.py b/agent.py
index cb767a5a04..50d47e4c3b 100644
--- a/agent.py
+++ b/agent.py
@@ -8,11 +8,18 @@
from datetime import datetime, timezone
from typing import Any, Awaitable, Coroutine, Dict, Literal
from enum import Enum
-import uuid
import models
-from python.helpers import extract_tools, files, errors, history, tokens
-from python.helpers import dirty_json
+from python.helpers import (
+ extract_tools,
+ files,
+ errors,
+ history,
+ tokens,
+ context as context_helper,
+ dirty_json,
+ subagents
+)
from python.helpers.print_style import PrintStyle
from langchain_core.prompts import (
@@ -53,13 +60,26 @@ def __init__(
created_at: datetime | None = None,
type: AgentContextType = AgentContextType.USER,
last_message: datetime | None = None,
+ data: dict | None = None,
+ output_data: dict | None = None,
+ set_current: bool = False,
):
- # build context
+ # initialize context
self.id = id or AgentContext.generate_id()
+ existing = self._contexts.get(self.id, None)
+ if existing:
+ AgentContext.remove(self.id)
+ self._contexts[self.id] = self
+ if set_current:
+ AgentContext.set_current(self.id)
+
+ # initialize state
self.name = name
self.config = config
+ self.data = data or {}
+ self.output_data = output_data or {}
self.log = log or Log.Log()
- self.agent0 = agent0 or Agent(0, self.config, self)
+ self.log.context = self
self.paused = paused
self.streaming_agent = streaming_agent
self.task: DeferredTask | None = None
@@ -67,18 +87,35 @@ def __init__(
self.type = type
AgentContext._counter += 1
self.no = AgentContext._counter
- # set to start of unix epoch
self.last_message = last_message or datetime.now(timezone.utc)
- existing = self._contexts.get(self.id, None)
- if existing:
- AgentContext.remove(self.id)
- self._contexts[self.id] = self
+ # initialize agent at last (context is complete now)
+ self.agent0 = agent0 or Agent(0, self.config, self)
@staticmethod
def get(id: str):
return AgentContext._contexts.get(id, None)
+ @staticmethod
+ def use(id: str):
+ context = AgentContext.get(id)
+ if context:
+ AgentContext.set_current(id)
+ else:
+ AgentContext.set_current("")
+ return context
+
+ @staticmethod
+ def current():
+ ctxid = context_helper.get_context_data("agent_context_id", "")
+ if not ctxid:
+ return None
+ return AgentContext.get(ctxid)
+
+ @staticmethod
+ def set_current(ctxid: str):
+ context_helper.set_context_data("agent_context_id", ctxid)
+
@staticmethod
def first():
if not AgentContext._contexts:
@@ -92,7 +129,8 @@ def all():
@staticmethod
def generate_id():
def generate_short_id():
- return ''.join(random.choices(string.ascii_letters + string.digits, k=8))
+ return "".join(random.choices(string.ascii_letters + string.digits, k=8))
+
while True:
short_id = generate_short_id()
if short_id not in AgentContext._contexts:
@@ -102,6 +140,7 @@ def generate_short_id():
def get_notification_manager(cls):
if cls._notification_manager is None:
from python.helpers.notification import NotificationManager # type: ignore
+
cls._notification_manager = NotificationManager()
return cls._notification_manager
@@ -112,7 +151,23 @@ def remove(id: str):
context.task.kill()
return context
- def serialize(self):
+ def get_data(self, key: str, recursive: bool = True):
+ # recursive is not used now, prepared for context hierarchy
+ return self.data.get(key, None)
+
+ def set_data(self, key: str, value: Any, recursive: bool = True):
+ # recursive is not used now, prepared for context hierarchy
+ self.data[key] = value
+
+ def get_output_data(self, key: str, recursive: bool = True):
+ # recursive is not used now, prepared for context hierarchy
+ return self.output_data.get(key, None)
+
+ def set_output_data(self, key: str, value: Any, recursive: bool = True):
+ # recursive is not used now, prepared for context hierarchy
+ self.output_data[key] = value
+
+ def output(self):
return {
"id": self.id,
"name": self.name,
@@ -132,6 +187,7 @@ def serialize(self):
else Localization.get().serialize_datetime(datetime.fromtimestamp(0))
),
"type": self.type.value,
+ **self.output_data,
}
@staticmethod
@@ -222,7 +278,6 @@ async def _process_chain(self, agent: "Agent", msg: "UserMessage|str", user=True
agent.handle_critical_exception(e)
-
@dataclass
class AgentConfig:
chat_model: models.ModelConfig
@@ -233,7 +288,9 @@ class AgentConfig:
profile: str = ""
memory_subdir: str = ""
knowledge_subdirs: list[str] = field(default_factory=lambda: ["default", "custom"])
- browser_http_headers: dict[str, str] = field(default_factory=dict) # Custom HTTP headers for browser requests
+ browser_http_headers: dict[str, str] = field(
+ default_factory=dict
+ ) # Custom HTTP headers for browser requests
code_exec_ssh_enabled: bool = True
code_exec_ssh_addr: str = "localhost"
code_exec_ssh_port: int = 55022
@@ -260,6 +317,7 @@ def __init__(self, **kwargs):
self.last_response = ""
self.params_temporary: dict = {}
self.params_persistent: dict = {}
+ self.current_tool = None
# override values with kwargs
for key, value in kwargs.items():
@@ -306,6 +364,7 @@ def __init__(
asyncio.run(self.call_extensions("agent_init"))
async def monologue(self):
+ error_retries = 0 # counter for critical error retries
while True:
try:
# loop data dictionary to pass to extensions
@@ -332,7 +391,9 @@ async def monologue(self):
prompt = await self.prepare_prompt(loop_data=self.loop_data)
# call before_main_llm_call extensions
- await self.call_extensions("before_main_llm_call", loop_data=self.loop_data)
+ await self.call_extensions(
+ "before_main_llm_call", loop_data=self.loop_data
+ )
async def reasoning_callback(chunk: str, full: str):
await self.handle_intervention()
@@ -341,7 +402,9 @@ async def reasoning_callback(chunk: str, full: str):
# Pass chunk and full data to extensions for processing
stream_data = {"chunk": chunk, "full": full}
await self.call_extensions(
- "reasoning_stream_chunk", loop_data=self.loop_data, stream_data=stream_data
+ "reasoning_stream_chunk",
+ loop_data=self.loop_data,
+ stream_data=stream_data,
)
# Stream masked chunk after extensions processed it
if stream_data.get("chunk"):
@@ -357,7 +420,9 @@ async def stream_callback(chunk: str, full: str):
# Pass chunk and full data to extensions for processing
stream_data = {"chunk": chunk, "full": full}
await self.call_extensions(
- "response_stream_chunk", loop_data=self.loop_data, stream_data=stream_data
+ "response_stream_chunk",
+ loop_data=self.loop_data,
+ stream_data=stream_data,
)
# Stream masked chunk after extensions processed it
if stream_data.get("chunk"):
@@ -405,6 +470,7 @@ async def stream_callback(chunk: str, full: str):
# exceptions inside message loop:
except InterventionException as e:
+ error_retries = 0 # reset retry counter on user intervention
pass # intervention message has been handled in handle_intervention(), proceed with conversation loop
except RepairableException as e:
# Forward repairable errors to the LLM, maybe it can fix them
@@ -414,8 +480,10 @@ async def stream_callback(chunk: str, full: str):
PrintStyle(font_color="red", padding=True).print(msg["message"])
self.context.log.log(type="error", content=msg["message"])
except Exception as e:
- # Other exception kill the loop
- self.handle_critical_exception(e)
+ # Retry critical exceptions before failing
+ error_retries = await self.retry_critical_exception(
+ e, error_retries
+ )
finally:
# call message_loop_end extensions
@@ -425,9 +493,13 @@ async def stream_callback(chunk: str, full: str):
# exceptions outside message loop:
except InterventionException as e:
+ error_retries = 0 # reset retry counter on user intervention
pass # just start over
except Exception as e:
- self.handle_critical_exception(e)
+ # Retry critical exceptions before failing
+ error_retries = await self.retry_critical_exception(
+ e, error_retries
+ )
finally:
self.context.streaming_agent = None # unset current streamer
# call monologue_end extensions
@@ -484,6 +556,30 @@ async def prepare_prompt(self, loop_data: LoopData) -> list[BaseMessage]:
return full_prompt
+ async def retry_critical_exception(
+ self, e: Exception, error_retries: int, delay: int = 3, max_retries: int = 1
+ ) -> int:
+ if error_retries >= max_retries:
+ self.handle_critical_exception(e)
+
+ error_message = errors.format_error(e)
+
+ self.context.log.log(
+ type="warning", content="Critical error occurred, retrying..."
+ )
+ PrintStyle(font_color="orange", padding=True).print(
+ "Critical error occurred, retrying..."
+ )
+ await asyncio.sleep(delay)
+ agent_facing_error = self.read_prompt(
+ "fw.msg_critical_error.md", error_message=error_message
+ )
+ self.hist_add_warning(message=agent_facing_error)
+ PrintStyle(font_color="orange", padding=True).print(
+ agent_facing_error
+ )
+ return error_retries + 1
+
def handle_critical_exception(self, exception: Exception):
if isinstance(exception, HandledException):
raise exception # Re-raise the exception to kill the loop
@@ -522,28 +618,17 @@ async def get_system_prompt(self, loop_data: LoopData) -> list[str]:
return system_prompt
def parse_prompt(self, _prompt_file: str, **kwargs):
- dirs = [files.get_abs_path("prompts")]
- if (
- self.config.profile
- ): # if agent has custom folder, use it and use default as backup
- prompt_dir = files.get_abs_path("agents", self.config.profile, "prompts")
- dirs.insert(0, prompt_dir)
+ dirs = subagents.get_paths(self, "prompts")
prompt = files.parse_file(
- _prompt_file, _directories=dirs, **kwargs
+ _prompt_file, _directories=dirs, _agent=self, **kwargs
)
return prompt
def read_prompt(self, file: str, **kwargs) -> str:
- dirs = [files.get_abs_path("prompts")]
- if (
- self.config.profile
- ): # if agent has custom folder, use it and use default as backup
- prompt_dir = files.get_abs_path("agents", self.config.profile, "prompts")
- dirs.insert(0, prompt_dir)
- prompt = files.read_prompt_file(
- file, _directories=dirs, **kwargs
- )
- prompt = files.remove_code_fences(prompt)
+ dirs = subagents.get_paths(self, "prompts")
+ prompt = files.read_prompt_file(file, _directories=dirs, _agent=self, **kwargs)
+ if files.is_full_json_template(prompt):
+ prompt = files.remove_code_fences(prompt)
return prompt
def get_data(self, field: str):
@@ -558,8 +643,12 @@ def hist_add_message(
self.last_message = datetime.now(timezone.utc)
# Allow extensions to process content before adding to history
content_data = {"content": content}
- asyncio.run(self.call_extensions("hist_add_before", content_data=content_data, ai=ai))
- return self.history.add_message(ai=ai, content=content_data["content"], tokens=tokens)
+ asyncio.run(
+ self.call_extensions("hist_add_before", content_data=content_data, ai=ai)
+ )
+ return self.history.add_message(
+ ai=ai, content=content_data["content"], tokens=tokens
+ )
def hist_add_user_message(self, message: UserMessage, intervention: bool = False):
self.history.new_topic() # user message starts a new topic in history
@@ -671,8 +760,10 @@ async def stream_callback(chunk: str, total: str):
response, _reasoning = await call_data["model"].unified_call(
system_message=call_data["system"],
user_message=call_data["message"],
- response_callback=stream_callback,
- rate_limiter_callback=self.rate_limiter_callback if not call_data["background"] else None,
+ response_callback=stream_callback if call_data["callback"] else None,
+ rate_limiter_callback=(
+ self.rate_limiter_callback if not call_data["background"] else None
+ ),
)
return response
@@ -694,7 +785,9 @@ async def call_chat_model(
messages=messages,
reasoning_callback=reasoning_callback,
response_callback=response_callback,
- rate_limiter_callback=self.rate_limiter_callback if not background else None,
+ rate_limiter_callback=(
+ self.rate_limiter_callback if not background else None
+ ),
)
return response, reasoning
@@ -714,6 +807,13 @@ async def handle_intervention(self, progress: str = ""):
): # if there is an intervention message, but not yet processed
msg = self.intervention
self.intervention = None # reset the intervention message
+ # If a tool was running, save its progress to history
+ last_tool = self.loop_data.current_tool
+ if last_tool:
+ tool_progress = last_tool.progress.strip()
+ if tool_progress:
+ self.hist_add_tool_result(last_tool.name, tool_progress)
+ last_tool.set_progress(None)
if progress.strip():
self.hist_add_ai_response(progress)
# append the intervention message
@@ -762,31 +862,44 @@ async def process_tools(self, msg: str):
# Fallback to local get_tool if MCP tool was not found or MCP lookup failed
if not tool:
tool = self.get_tool(
- name=tool_name, method=tool_method, args=tool_args, message=msg, loop_data=self.loop_data
+ name=tool_name,
+ method=tool_method,
+ args=tool_args,
+ message=msg,
+ loop_data=self.loop_data,
)
if tool:
- await self.handle_intervention()
+ self.loop_data.current_tool = tool # type: ignore
+ try:
+ await self.handle_intervention()
+ # Call tool hooks for compatibility
+ await tool.before_execution(**tool_args)
+ await self.handle_intervention()
- # Call tool hooks for compatibility
- await tool.before_execution(**tool_args)
- await self.handle_intervention()
+ # Allow extensions to preprocess tool arguments
+ await self.call_extensions(
+ "tool_execute_before",
+ tool_args=tool_args or {},
+ tool_name=tool_name,
+ )
- # Allow extensions to preprocess tool arguments
- await self.call_extensions("tool_execute_before", tool_args=tool_args or {}, tool_name=tool_name)
+ response = await tool.execute(**tool_args)
+ await self.handle_intervention()
- response = await tool.execute(**tool_args)
- await self.handle_intervention()
+ # Allow extensions to postprocess tool response
+ await self.call_extensions(
+ "tool_execute_after", response=response, tool_name=tool_name
+ )
- # Allow extensions to postprocess tool response
- await self.call_extensions("tool_execute_after", response=response, tool_name=tool_name)
-
- await tool.after_execution(response)
- await self.handle_intervention()
+ await tool.after_execution(response)
+ await self.handle_intervention()
- if response.break_loop:
- return response.message
+ if response.break_loop:
+ return response.message
+ finally:
+ self.loop_data.current_tool = None
else:
error_detail = (
f"Tool '{raw_tool_name}' not found or could not be initialized."
@@ -831,34 +944,40 @@ async def handle_response_stream(self, stream: str):
pass
def get_tool(
- self, name: str, method: str | None, args: dict, message: str, loop_data: LoopData | None, **kwargs
+ self,
+ name: str,
+ method: str | None,
+ args: dict,
+ message: str,
+ loop_data: LoopData | None,
+ **kwargs,
):
from python.tools.unknown import Unknown
from python.helpers.tool import Tool
classes = []
- # try agent tools first
- if self.config.profile:
+ # search for tools in agent's folder hierarchy
+ paths = subagents.get_paths(self, "tools", name + ".py", default_root="python")
+ for path in paths:
try:
- classes = extract_tools.load_classes_from_file(
- "agents/" + self.config.profile + "/tools/" + name + ".py", Tool # type: ignore[arg-type]
- )
+ classes = extract_tools.load_classes_from_file(path, Tool) # type: ignore[arg-type]
+ break
except Exception:
- pass
+ continue
- # try default tools
- if not classes:
- try:
- classes = extract_tools.load_classes_from_file(
- "python/tools/" + name + ".py", Tool # type: ignore[arg-type]
- )
- except Exception as e:
- pass
tool_class = classes[0] if classes else Unknown
return tool_class(
- agent=self, name=name, method=method, args=args, message=message, loop_data=loop_data, **kwargs
+ agent=self,
+ name=name,
+ method=method,
+ args=args,
+ message=message,
+ loop_data=loop_data,
+ **kwargs,
)
async def call_extensions(self, extension_point: str, **kwargs) -> Any:
- return await call_extensions(extension_point=extension_point, agent=self, **kwargs)
+ return await call_extensions(
+ extension_point=extension_point, agent=self, **kwargs
+ )
diff --git a/agents/agent0/agent.json b/agents/agent0/agent.json
new file mode 100644
index 0000000000..4fa2cb2c12
--- /dev/null
+++ b/agents/agent0/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Agent 0",
+ "description": "Main agent of the system communicating directly with the user.",
+ "context": ""
+}
diff --git a/agents/default/agent.json b/agents/default/agent.json
new file mode 100644
index 0000000000..846d2a679f
--- /dev/null
+++ b/agents/default/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Default prompts",
+ "description": "Default prompt file templates. Should be inherited and overriden by specialized prompt profiles.",
+ "context": ""
+}
diff --git a/agents/developer/agent.json b/agents/developer/agent.json
new file mode 100644
index 0000000000..8680176e36
--- /dev/null
+++ b/agents/developer/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Developer",
+ "description": "Agent specialized in complex software development.",
+ "context": "Use this agent for software development tasks, including writing code, debugging, refactoring, and architectural design."
+}
diff --git a/agents/hacker/agent.json b/agents/hacker/agent.json
new file mode 100644
index 0000000000..cde645d798
--- /dev/null
+++ b/agents/hacker/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Hacker",
+ "description": "Agent specialized in cyber security and penetration testing.",
+ "context": "Use this agent for cybersecurity tasks such as penetration testing, vulnerability analysis, and security auditing."
+}
diff --git a/agents/researcher/agent.json b/agents/researcher/agent.json
new file mode 100644
index 0000000000..e06a9639b5
--- /dev/null
+++ b/agents/researcher/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Researcher",
+ "description": "Agent specialized in research, data analysis and reporting.",
+ "context": "Use this agent for information gathering, data analysis, topic research, and generating comprehensive reports."
+}
diff --git a/conf/model_providers.yaml b/conf/model_providers.yaml
index 0683cf0119..930c229965 100644
--- a/conf/model_providers.yaml
+++ b/conf/model_providers.yaml
@@ -27,6 +27,9 @@ chat:
anthropic:
name: Anthropic
litellm_provider: anthropic
+ cometapi:
+ name: CometAPI
+ litellm_provider: cometapi
deepseek:
name: DeepSeek
litellm_provider: deepseek
@@ -35,8 +38,9 @@ chat:
litellm_provider: github_copilot
kwargs:
extra_headers:
- "Editor-Version": "vscode/1.85.1"
- "Copilot-Integration-Id": "vscode-chat"
+ "Editor-Version": "vscode/1.85.1"
+ "Copilot-Integration-Id": "vscode-chat"
+ "Copilot-Vision-Request": "true"
google:
name: Google
litellm_provider: gemini
@@ -61,6 +65,9 @@ chat:
azure:
name: OpenAI Azure
litellm_provider: azure
+ bedrock:
+ name: AWS Bedrock
+ litellm_provider: bedrock
openrouter:
name: OpenRouter
litellm_provider: openrouter
@@ -81,6 +88,16 @@ chat:
xai:
name: xAI
litellm_provider: xai
+ zai:
+ name: Z.AI
+ litellm_provider: openai
+ kwargs:
+ api_base: https://api.z.ai/api/paas/v4
+ zai_coding:
+ name: Z.AI Coding
+ litellm_provider: openai
+ kwargs:
+ api_base: https://api.z.ai/api/coding/paas/v4
other:
name: Other OpenAI compatible
litellm_provider: openai
@@ -107,6 +124,18 @@ embedding:
azure:
name: OpenAI Azure
litellm_provider: azure
+ bedrock:
+ name: AWS Bedrock
+ litellm_provider: bedrock
+ # TODO: OpenRouter not yet supported by LiteLLM, replace with native litellm_provider openrouter and remove api_base when ready
+ openrouter:
+ name: OpenRouter
+ litellm_provider: openai
+ kwargs:
+ api_base: https://openrouter.ai/api/v1
+ extra_headers:
+ "HTTP-Referer": "https://agent-zero.ai/"
+ "X-Title": "Agent Zero"
other:
name: Other OpenAI compatible
- litellm_provider: openai
\ No newline at end of file
+ litellm_provider: openai
diff --git a/conf/projects.default.gitignore b/conf/projects.default.gitignore
new file mode 100644
index 0000000000..9a5f01f2ae
--- /dev/null
+++ b/conf/projects.default.gitignore
@@ -0,0 +1,13 @@
+# A0 project meta folder
+.a0proj/
+
+# Python environments & cache
+venv/
+**/__pycache__/
+
+# Node.js dependencies
+**/node_modules/
+**/.npm/
+
+# Version control metadata
+**/.git/
diff --git a/docker/base/fs/ins/install_python.sh b/docker/base/fs/ins/install_python.sh
index 82f2fc3832..417395ebcc 100644
--- a/docker/base/fs/ins/install_python.sh
+++ b/docker/base/fs/ins/install_python.sh
@@ -20,7 +20,7 @@ python3.13 -m venv /opt/venv
source /opt/venv/bin/activate
# upgrade pip and install static packages
-pip install --no-cache-dir --upgrade pip ipython requests
+pip install --no-cache-dir --upgrade pip pipx ipython requests
echo "====================PYTHON PYVENV===================="
diff --git a/docker/run/fs/ins/install_A0.sh b/docker/run/fs/ins/install_A0.sh
index 7b5d0d8073..0aeaf13ff8 100644
--- a/docker/run/fs/ins/install_A0.sh
+++ b/docker/run/fs/ins/install_A0.sh
@@ -36,6 +36,8 @@ fi
# Install remaining A0 python packages
uv pip install -r /git/agent-zero/requirements.txt
+# override for packages that have unnecessarily strict dependencies
+uv pip install -r /git/agent-zero/requirements2.txt
# install playwright
bash /ins/install_playwright.sh "$@"
diff --git a/docs/connectivity.md b/docs/connectivity.md
index 8cfbe250ec..a6f465eaba 100644
--- a/docs/connectivity.md
+++ b/docs/connectivity.md
@@ -25,6 +25,7 @@ Send messages to Agent Zero and receive responses. Supports text messages, file
* `message` (string, required): The message to send
* `attachments` (array, optional): Array of `{filename, base64}` objects
* `lifetime_hours` (number, optional): Chat lifetime in hours (default: 24)
+* `project` (string, optional): Project name to activate (only on first message)
**Headers:**
* `X-API-KEY` (required)
@@ -169,6 +170,63 @@ async function sendWithAttachment() {
sendWithAttachment();
```
+#### Project Usage Example
+
+```javascript
+// Working with projects
+async function sendMessageWithProject() {
+ try {
+ // First message - activate project
+ const response = await fetch('YOUR_AGENT_ZERO_URL/api_message', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-API-KEY': 'YOUR_API_KEY'
+ },
+ body: JSON.stringify({
+ message: "Analyze the project structure",
+ project: "my-web-app" // Activates this project
+ })
+ });
+
+ const data = await response.json();
+
+ if (response.ok) {
+ console.log('β Project activated!');
+ console.log('Context ID:', data.context_id);
+ console.log('Response:', data.response);
+
+ // Continue conversation - project already set
+ const followUp = await fetch('YOUR_AGENT_ZERO_URL/api_message', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-API-KEY': 'YOUR_API_KEY'
+ },
+ body: JSON.stringify({
+ context_id: data.context_id,
+ message: "What files are in the project?"
+ // Do NOT include project field here - already set on first message
+ })
+ });
+
+ const followUpData = await followUp.json();
+ console.log('Follow-up response:', followUpData.response);
+ return followUpData;
+ } else {
+ console.error('β Error:', data.error);
+ return null;
+ }
+ } catch (error) {
+ console.error('β Request failed:', error);
+ return null;
+ }
+}
+
+// Call the function
+sendMessageWithProject();
+```
+
---
## `GET/POST /api_log_get`
@@ -568,6 +626,30 @@ Below is an example of a `mcp.json` configuration file that a client could use t
}
```
+### Project Support in MCP
+
+You can specify a project for MCP connections by including it in the URL path:
+
+```json
+{
+ "mcpServers": {
+ "agent-zero-with-project": {
+ "type": "sse",
+ "url": "YOUR_AGENT_ZERO_URL/mcp/t-YOUR_API_TOKEN/p-my-project-name/sse"
+ },
+ "agent-zero-http-with-project": {
+ "type": "streamable-http",
+ "url": "YOUR_AGENT_ZERO_URL/mcp/t-YOUR_API_TOKEN/p-my-project-name/http/"
+ }
+ }
+}
+```
+
+When a project is specified in the URL:
+- All new chats will be created within that project context
+- The agent will have access to project-specific instructions, knowledge, and file structure
+- Attempting to use an existing chat_id from a different project will result in an error
+
---
## A2A (Agent-to-Agent) Connectivity
@@ -583,3 +665,14 @@ To connect another agent to your Agent Zero instance, use the following URL form
```
YOUR_AGENT_ZERO_URL/a2a/t-YOUR_API_TOKEN
```
+
+To connect with a specific project active:
+
+```
+YOUR_AGENT_ZERO_URL/a2a/t-YOUR_API_TOKEN/p-PROJECT_NAME
+```
+
+When a project is specified:
+- All A2A conversations will run in the context of that project
+- The agent will have access to project-specific resources, instructions, and knowledge
+- This enables project-isolated agent-to-agent communication
diff --git a/docs/development.md b/docs/development.md
index 54fe39580e..9faa1805bd 100644
--- a/docs/development.md
+++ b/docs/development.md
@@ -149,6 +149,20 @@ You're now ready to contribute to Agent Zero, create custom extensions, or modif
- See [extensibility](extensibility.md) for instructions on how to create custom extensions.
- See [contribution](contribution.md) for instructions on how to contribute to the framework.
+## Configuration via Environment Variables
+
+For development and testing, you can override default settings using the `.env` file with `A0_SET_` prefixed variables:
+
+```env
+# Add to your .env file
+A0_SET_chat_model_provider=ollama
+A0_SET_chat_model_name=llama3.2
+A0_SET_chat_model_api_base=http://localhost:11434
+A0_SET_memory_recall_interval=5
+```
+
+These environment variables automatically override the hardcoded defaults in `get_default_settings()` without modifying code. Useful for testing different configurations or multi-environment setups.
+
## Want to build your docker image?
- You can use the `DockerfileLocal` to build your docker image.
- Navigate to your project root in the terminal and run `docker build -f DockerfileLocal -t agent-zero-local --build-arg CACHE_DATE=$(date +%Y-%m-%d:%H:%M:%S) .`
diff --git a/docs/extensibility.md b/docs/extensibility.md
index bf70d38975..db10a2a887 100644
--- a/docs/extensibility.md
+++ b/docs/extensibility.md
@@ -213,6 +213,11 @@ Agent Zero supports creating specialized subagents with customized behavior. The
- `/agents/{agent_profile}/extensions/` - for custom extensions
- `/agents/{agent_profile}/tools/` - for custom tools
- `/agents/{agent_profile}/prompts/` - for custom prompts
+ - `/agents/{agent_profile}/settings.json` - for agent-specific configuration overrides
+
+The `settings.json` file for an agent uses the same structure as `tmp/settings.json`, but you only need to specify the fields you want to override. Any field omitted from the agent-specific `settings.json` will continue to use the global value.
+
+This allows power users to, for example, change the AI model, context window size, or other settings for a single agent without affecting the rest of the system.
### Example Subagent Structure
@@ -223,15 +228,71 @@ Agent Zero supports creating specialized subagents with customized behavior. The
β βββ _10_example_extension.py
βββ prompts/
β βββ ...
-βββ tools/
- βββ example_tool.py
- βββ response.py
+βββ tools/
+β βββ example_tool.py
+β βββ response.py
+βββ settings.json
```
In this example:
- `_10_example_extension.py` is an extension that renames the agent when initialized
- `response.py` overrides the default response tool with custom behavior
- `example_tool.py` is a new tool specific to this agent
+- `settings.json` overrides any global settings for this specific agent (only for the fields defined in this file)
+
+## Projects
+
+Projects provide isolated workspaces for individual chats, keeping prompts, memory, knowledge, files, and secrets scoped to a specific use case.
+
+### Project Location and Structure
+
+- Projects are located under `/a0/usr/projects/`
+- Each project has its own subdirectory, created by users via the UI
+- A project can be backed up or restored by copying or downloading its entire directory
+
+Each project directory contains a hidden `.a0proj` folder with project metadata and configuration:
+
+```
+/a0/usr/projects/{project_name}/
+βββ .a0proj/
+ βββ project.json # project metadata and settings
+ βββ instructions/ # additional prompt/instruction files
+ βββ knowledge/ # files to be imported into memory
+ βββ memory/ # project-specific memory storage
+ βββ secrets.env # sensitive variables (secrets)
+ βββ variables.env # non-sensitive variables
+```
+
+### Behavior When a Project Is Active in a Chat
+
+When a project is activated for a chat:
+
+- The agent is instructed to work **inside the project directory**
+- Project prompts (instructions) from `.a0proj/instructions/` are **automatically injected** into the context window (all text files are imported)
+- Memory can be configured as **project-specific**, meaning:
+ - It does not mix with global memory
+ - The memory file is stored under `.a0proj/memory/`
+- Files created or modified by the agent are located within the project directory
+
+The `.a0proj/knowledge/` folder contains files that are imported into the projectβs memory, enabling project-focused knowledge bases.
+
+### Secrets and Variables
+
+Each project manages its own configuration values via environment files in `.a0proj/`:
+
+- `secrets.env` β **sensitive variables**, such as API keys or passwords
+- `variables.env` β **non-sensitive variables**, such as configuration flags or identifiers
+
+These files allow you to keep credentials and configuration tightly scoped to a single project.
+
+### When to Use Projects
+
+Projects are the recommended way to create specialized workflows in Agent Zero when you need to:
+
+- Add specific instructions without affecting global behavior
+- Isolate file context, knowledge, and memory for a particular task or client
+- Keep passwords and other secrets scoped to a single workspace
+- Run multiple independent flows side by side under the same Agent Zero installation
## Best Practices
- Keep extensions focused on a single responsibility
diff --git a/docs/installation.md b/docs/installation.md
index b8688f0919..c611b1b798 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -93,9 +93,55 @@ The following user guide provides instructions for installing and running Agent
- `/tmp/settings.json` - Your Agent Zero settings
> [!TIP]
-> Choose a location that's easy to access and backup. All your Agent Zero data
+> Choose a location that's easy to access and backup. All your Agent Zero data
> will be directly accessible in this directory.
+### Automated Configuration via Environment Variables
+
+Agent Zero settings can be automatically configured using environment variables with the `A0_SET_` prefix in your `.env` file. This enables automated deployments without manual configuration.
+
+**Usage:**
+Add variables to your `.env` file in the format:
+```
+A0_SET_{setting_name}={value}
+```
+
+**Examples:**
+```env
+# Model configuration
+A0_SET_chat_model_provider=anthropic
+A0_SET_chat_model_name=claude-3-5-sonnet-20241022
+A0_SET_chat_model_ctx_length=200000
+
+# Memory settings
+A0_SET_memory_recall_enabled=true
+A0_SET_memory_recall_interval=5
+
+# Agent configuration
+A0_SET_agent_profile=custom
+A0_SET_agent_memory_subdir=production
+```
+
+**Docker usage:**
+When running Docker, you can pass these as environment variables:
+```bash
+docker run -p 50080:80 \
+ -e A0_SET_chat_model_provider=anthropic \
+ -e A0_SET_chat_model_name=claude-3-5-sonnet-20241022 \
+ agent0ai/agent-zero
+```
+
+**Type conversion:**
+- Strings are used as-is
+- Numbers are automatically converted (e.g., "100000" becomes integer 100000)
+- Booleans accept: true/false, 1/0, yes/no, on/off (case-insensitive)
+- Dictionaries must be valid JSON (e.g., `{"temperature": "0"}`)
+
+**Notes:**
+- These provide initial default values when settings.json doesn't exist or when new settings are added to the application. Once a value is saved in settings.json, it takes precedence over these environment variables.
+- Sensitive settings (API keys, passwords) use their existing environment variables
+- Container/process restart required for changes to take effect
+
2.3. Run the container:
- In Docker Desktop, go back to the "Images" tab
- Click the `Run` button next to the `agent0ai/agent-zero` image
diff --git a/docs/res/banner_high.png b/docs/res/banner_high.png
new file mode 100644
index 0000000000..69e4155628
Binary files /dev/null and b/docs/res/banner_high.png differ
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index 97ebd58fa8..487ec524d2 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -12,7 +12,7 @@ This page addresses frequently asked questions (FAQ) and provides troubleshootin
Refer to the [Choosing your LLMs](installation.md#installing-and-using-ollama-local-models) section of the documentation for detailed instructions and examples for configuring different LLMs. Local models can be run using Ollama or LM Studio.
> [!TIP]
-> Some LLM providers offer free usage of their APIs, for example Groq, Mistral or SambaNova.
+> Some LLM providers offer free usage of their APIs, for example Groq, Mistral, SambaNova or CometAPI.
**6. How can I make Agent Zero retain memory between sessions?**
Refer to the [How to update Agent Zero](installation.md#how-to-update-agent-zero) section of the documentation for instructions on how to update Agent Zero while retaining memory and data.
diff --git a/initialize.py b/initialize.py
index 3c42c952e5..ec26227fa9 100644
--- a/initialize.py
+++ b/initialize.py
@@ -4,8 +4,10 @@
from python.helpers.print_style import PrintStyle
-def initialize_agent():
+def initialize_agent(override_settings: dict | None = None):
current_settings = settings.get_settings()
+ if override_settings:
+ current_settings = settings.merge_settings(current_settings, override_settings)
def _normalize_model_kwargs(kwargs: dict) -> dict:
# convert string values that represent valid Python numbers to numeric types
diff --git a/models.py b/models.py
index 12c36afb49..4676352c29 100644
--- a/models.py
+++ b/models.py
@@ -22,7 +22,7 @@
from python.helpers import dotenv
from python.helpers import settings, dirty_json
from python.helpers.dotenv import load_dotenv
-from python.helpers.providers import get_provider_config
+from python.helpers.providers import ModelType as ProviderModelType, get_provider_config
from python.helpers.rate_limiter import RateLimiter
from python.helpers.tokens import approximate_tokens
from python.helpers import dirty_json, browser_use_monkeypatch
@@ -41,6 +41,7 @@
)
from langchain.embeddings.base import Embeddings
from sentence_transformers import SentenceTransformer
+from pydantic import ConfigDict
# disable extra logging, must be done repeatedly, otherwise browser-use will turn it back on for some reason
@@ -106,17 +107,17 @@ def __init__(self, chunk: ChatChunk|None = None):
def add_chunk(self, chunk: ChatChunk) -> ChatChunk:
if chunk["reasoning_delta"]:
self.native_reasoning = True
-
+
# if native reasoning detection works, there's no need to worry about thinking tags
if self.native_reasoning:
processed_chunk = ChatChunk(response_delta=chunk["response_delta"], reasoning_delta=chunk["reasoning_delta"])
else:
# if the model outputs thinking tags, we ned to parse them manually as reasoning
processed_chunk = self._process_thinking_chunk(chunk)
-
+
self.reasoning += processed_chunk["reasoning_delta"]
self.response += processed_chunk["response_delta"]
-
+
return processed_chunk
def _process_thinking_chunk(self, chunk: ChatChunk) -> ChatChunk:
@@ -145,7 +146,7 @@ def _process_thinking_tags(self, response: str, reasoning: str) -> ChatChunk:
response = response[len(opening_tag):]
self.thinking = True
self.thinking_tag = closing_tag
-
+
close_pos = response.find(closing_tag)
if close_pos != -1:
reasoning += response[:close_pos]
@@ -164,7 +165,7 @@ def _process_thinking_tags(self, response: str, reasoning: str) -> ChatChunk:
self.unprocessed = response
response = ""
break
-
+
return ChatChunk(response_delta=response, reasoning_delta=reasoning)
def _is_partial_opening_tag(self, text: str, opening_tag: str) -> bool:
@@ -191,7 +192,7 @@ def output(self) -> ChatChunk:
else:
response += self.unprocessed
return ChatChunk(response_delta=response, reasoning_delta=reasoning)
-
+
rate_limiters: dict[str, RateLimiter] = {}
api_keys_round_robin: dict[str, int] = {}
@@ -293,10 +294,11 @@ class LiteLLMChatWrapper(SimpleChatModel):
provider: str
kwargs: dict = {}
- class Config:
- arbitrary_types_allowed = True
- extra = "allow" # Allow extra attributes
- validate_assignment = False # Don't validate on assignment
+ model_config = ConfigDict(
+ arbitrary_types_allowed=True,
+ extra="allow",
+ validate_assignment=False,
+ )
def __init__(
self,
@@ -487,6 +489,7 @@ async def unified_call(
call_kwargs: dict[str, Any] = {**self.kwargs, **kwargs}
max_retries: int = int(call_kwargs.pop("a0_retry_attempts", 2))
retry_delay_s: float = float(call_kwargs.pop("a0_retry_delay_seconds", 1.5))
+ stream = reasoning_callback is not None or response_callback is not None or tokens_callback is not None
# results
result = ChatGenerationResult()
@@ -499,48 +502,59 @@ async def unified_call(
_completion = await acompletion(
model=self.model_name,
messages=msgs_conv,
- stream=True,
+ stream=stream,
**call_kwargs,
)
- # iterate over chunks
- async for chunk in _completion: # type: ignore
- got_any_chunk = True
- # parse chunk
- parsed = _parse_chunk(chunk)
+ if stream:
+ # iterate over chunks
+ async for chunk in _completion: # type: ignore
+ got_any_chunk = True
+ # parse chunk
+ parsed = _parse_chunk(chunk)
+ output = result.add_chunk(parsed)
+
+ # collect reasoning delta and call callbacks
+ if output["reasoning_delta"]:
+ if reasoning_callback:
+ await reasoning_callback(output["reasoning_delta"], result.reasoning)
+ if tokens_callback:
+ await tokens_callback(
+ output["reasoning_delta"],
+ approximate_tokens(output["reasoning_delta"]),
+ )
+ # Add output tokens to rate limiter if configured
+ if limiter:
+ limiter.add(output=approximate_tokens(output["reasoning_delta"]))
+ # collect response delta and call callbacks
+ if output["response_delta"]:
+ if response_callback:
+ await response_callback(output["response_delta"], result.response)
+ if tokens_callback:
+ await tokens_callback(
+ output["response_delta"],
+ approximate_tokens(output["response_delta"]),
+ )
+ # Add output tokens to rate limiter if configured
+ if limiter:
+ limiter.add(output=approximate_tokens(output["response_delta"]))
+
+ # non-stream response
+ else:
+ parsed = _parse_chunk(_completion)
output = result.add_chunk(parsed)
-
- # collect reasoning delta and call callbacks
- if output["reasoning_delta"]:
- if reasoning_callback:
- await reasoning_callback(output["reasoning_delta"], result.reasoning)
- if tokens_callback:
- await tokens_callback(
- output["reasoning_delta"],
- approximate_tokens(output["reasoning_delta"]),
- )
- # Add output tokens to rate limiter if configured
- if limiter:
- limiter.add(output=approximate_tokens(output["reasoning_delta"]))
- # collect response delta and call callbacks
- if output["response_delta"]:
- if response_callback:
- await response_callback(output["response_delta"], result.response)
- if tokens_callback:
- await tokens_callback(
- output["response_delta"],
- approximate_tokens(output["response_delta"]),
- )
- # Add output tokens to rate limiter if configured
- if limiter:
+ if limiter:
+ if output["response_delta"]:
limiter.add(output=approximate_tokens(output["response_delta"]))
+ if output["reasoning_delta"]:
+ limiter.add(output=approximate_tokens(output["reasoning_delta"]))
# Successful completion of stream
return result.response, result.reasoning
except Exception as e:
import asyncio
-
+
# Retry only if no chunks received and error is transient
if got_any_chunk or not _is_transient_litellm_error(e) or attempt >= max_retries:
raise
@@ -799,12 +813,16 @@ def _parse_chunk(chunk: Any) -> ChatChunk:
message.get("content", "")
if isinstance(message, dict)
else getattr(message, "content", "")
- )
+ ) or ""
reasoning_delta = (
delta.get("reasoning_content", "")
if isinstance(delta, dict)
else getattr(delta, "reasoning_content", "")
- )
+ ) or (
+ message.get("reasoning_content", "")
+ if isinstance(message, dict)
+ else getattr(message, "reasoning_content", "")
+ ) or ""
return ChatChunk(reasoning_delta=reasoning_delta, response_delta=response_delta)
@@ -826,7 +844,7 @@ def _adjust_call_args(provider_name: str, model_name: str, kwargs: dict):
def _merge_provider_defaults(
- provider_type: str, original_provider: str, kwargs: dict
+ provider_type: ProviderModelType, original_provider: str, kwargs: dict
) -> tuple[str, dict]:
# Normalize .env-style numeric strings (e.g., "timeout=30") into ints/floats for LiteLLM
def _normalize_values(values: dict) -> dict:
diff --git a/prompts/agent.extras.project.file_structure.md b/prompts/agent.extras.project.file_structure.md
new file mode 100644
index 0000000000..5167003f94
--- /dev/null
+++ b/prompts/agent.extras.project.file_structure.md
@@ -0,0 +1,9 @@
+# File structure of project {{project_name}}
+- this is filtered overview not full scan
+- list yourself if needed
+- maximum depth: {{max_depth}}
+- ignored:
+{{gitignore}}
+
+## file tree
+{{file_structure}}
\ No newline at end of file
diff --git a/prompts/agent.system.main.tips.md b/prompts/agent.system.main.tips.md
index d1818c1a8a..1940d912f0 100644
--- a/prompts/agent.system.main.tips.md
+++ b/prompts/agent.system.main.tips.md
@@ -7,7 +7,7 @@ never assume success
memory refers memory tools not own knowledge
## Files
-save files in /root
+when not in project save files in /root
don't use spaces in file names
## Instruments
diff --git a/prompts/agent.system.projects.active.md b/prompts/agent.system.projects.active.md
new file mode 100644
index 0000000000..3d2ae63a91
--- /dev/null
+++ b/prompts/agent.system.projects.active.md
@@ -0,0 +1,12 @@
+## Active project
+Path: {{project_path}}
+Title: {{project_name}}
+Description: {{project_description}}
+
+
+### Important project instructions MUST follow
+- always work inside {{project_path}} directory
+- do not rename project directory do not change meta files in .a0proj folder
+- cleanup when code accidentaly creates files outside move them
+
+{{project_instructions}}
\ No newline at end of file
diff --git a/prompts/agent.system.projects.inactive.md b/prompts/agent.system.projects.inactive.md
new file mode 100644
index 0000000000..5cdd943900
--- /dev/null
+++ b/prompts/agent.system.projects.inactive.md
@@ -0,0 +1 @@
+no project currently activated
\ No newline at end of file
diff --git a/prompts/agent.system.projects.main.md b/prompts/agent.system.projects.main.md
new file mode 100644
index 0000000000..1b6890de60
--- /dev/null
+++ b/prompts/agent.system.projects.main.md
@@ -0,0 +1,5 @@
+# Projects
+- user can create and activate projects
+- projects have work folder in /usr/projects/ and instructions and config in /usr/projects//.a0proj
+- when activated agent works in project follows project instructions
+- agent cannot manipulate or switch projects
\ No newline at end of file
diff --git a/prompts/agent.system.tool.call_sub.md b/prompts/agent.system.tool.call_sub.md
index b2e267f932..c5c22dc75d 100644
--- a/prompts/agent.system.tool.call_sub.md
+++ b/prompts/agent.system.tool.call_sub.md
@@ -1,3 +1,4 @@
+{{if agent_profiles}}
### call_subordinate
you can use subordinates for subtasks
@@ -31,4 +32,5 @@ example usage
- you might be part of long chain of subordinates, avoid slow and expensive rewriting subordinate responses, instead use `Β§Β§include()` alias to include the response as is
**available profiles:**
-{{agent_profiles}}
\ No newline at end of file
+{{agent_profiles}}
+{{endif}}
\ No newline at end of file
diff --git a/prompts/agent.system.tool.call_sub.py b/prompts/agent.system.tool.call_sub.py
index e840cca60e..946bd7f321 100644
--- a/prompts/agent.system.tool.call_sub.py
+++ b/prompts/agent.system.tool.call_sub.py
@@ -1,31 +1,34 @@
import json
-from typing import Any
+from typing import Any, TYPE_CHECKING
from python.helpers.files import VariablesPlugin
-from python.helpers import files
+from python.helpers import files, projects, subagents
from python.helpers.print_style import PrintStyle
+if TYPE_CHECKING:
+ from agent import Agent
-class CallSubordinate(VariablesPlugin):
- def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]:
- # collect all prompt profiles from subdirectories (_context.md file)
- profiles = []
- agent_subdirs = files.get_subdirectories("agents", exclude=["_example"])
- for agent_subdir in agent_subdirs:
- try:
- context = files.read_prompt_file(
- "_context.md",
- [files.get_abs_path("agents", agent_subdir)]
- )
- profiles.append({"name": agent_subdir, "context": context})
- except Exception as e:
- PrintStyle().error(f"Error loading agent profile '{agent_subdir}': {e}")
+class CallSubordinate(VariablesPlugin):
+ def get_variables(
+ self, file: str, backup_dirs: list[str] | None = None, **kwargs
+ ) -> dict[str, Any]:
- # in case of no profiles
- if not profiles:
- # PrintStyle().error("No agent profiles found")
- profiles = [
- {"name": "default", "context": "Default Agent-Zero AI Assistant"}
- ]
+ # current agent instance
+ agent: Agent | None = kwargs.get("_agent", None)
+ # current project
+ project = projects.get_context_project_name(agent.context) if agent else None
+ # available agents in project (or global)
+ agents = subagents.get_available_agents_dict(project)
- return {"agent_profiles": profiles}
+ if agents:
+ profiles = {}
+ for name, subagent in agents.items():
+ profiles[name] = {
+ "title": subagent.title,
+ "description": subagent.description,
+ "context": subagent.context,
+ }
+ return {"agent_profiles": profiles}
+ else:
+ return {"agent_profiles": None}
+
diff --git a/prompts/agent.system.tool.document_query.md b/prompts/agent.system.tool.document_query.md
index e6ca444326..9cbc51db30 100644
--- a/prompts/agent.system.tool.document_query.md
+++ b/prompts/agent.system.tool.document_query.md
@@ -1,60 +1,62 @@
-### document_query:
-This tool can be used to read or analyze remote and local documents.
-It can be used to:
- * Get webpage or remote document text content
- * Get local document text content
- * Answer queries about a webpage, remote or local document
-By default, when the "queries" argument is empty, this tool returns the text content of the document retrieved using OCR.
-Additionally, you can pass a list of "queries" - in this case, the tool returns the answers to all the passed queries about the document.
-!!! This is a universal document reader qnd query tool
-!!! Supported document formats: HTML, PDF, Office Documents (word,excel, powerpoint), Textfiles and many more.
+### document_query
+read and analyze remote/local documents get text content or answer questions
+pass a single url/path or a list for multiple documents in "document"
+for web documents use "http://" or "https://"" prefix
+for local files "file://" prefix is optional but full path is required
+if "queries" is empty tool returns document content
+if "queries" is a list of strings tool returns answers
+supports various formats HTML PDF Office Text etc
+usage:
-#### Arguments:
- * "document" (string) : The web address or local path to the document in question. Webdocuments need "http://" or "https://" protocol prefix. For local files the "file:" protocol prefix is optional. Local files MUST be passed with full filesystem path.
- * "queries" (Optional, list[str]) : Optionally, here you can pass one or more queries to be answered (using and/or about) the document
-
-#### Usage example 1:
-##### Request:
-```json
+1 get content
+~~~json
{
"thoughts": [
- "...",
+ "I need to read..."
],
- "headline": "Reading web document content",
+ "headline": "...",
"tool_name": "document_query",
"tool_args": {
- "document": "https://...somexample",
+ "document": "https://.../document"
}
}
-```
-##### Response:
-```plaintext
-... Here is the entire content of the web document requested ...
-```
+~~~
-#### Usage example 2:
-##### Request:
-```json
+2 query document
+~~~json
{
"thoughts": [
- "...",
+ "I need to answer..."
],
- "headline": "Analyzing document to answer specific questions",
+ "headline": "...",
"tool_name": "document_query",
"tool_args": {
- "document": "https://...somexample",
+ "document": "https://.../document",
"queries": [
- "What is the topic?",
- "Who is the audience?"
+ "What is...",
+ "Who is..."
]
}
}
-```
-##### Response:
-```plaintext
-# What is the topic?
-... Description of the document topic ...
+~~~
-# Who is the audience?
-... The intended document audience list with short descriptions ...
-```
+3 query multiple documents
+~~~json
+{
+ "thoughts": [
+ "I need to compare..."
+ ],
+ "headline": "...",
+ "tool_name": "document_query",
+ "tool_args": {
+ "document": [
+ "https://.../document-one",
+ "file:///path/to/document-two"
+ ],
+ "queries": [
+ "Compare the main conclusions...",
+ "What are the key differences..."
+ ]
+ }
+}
+~~~
diff --git a/prompts/agent.system.tool.wait.md b/prompts/agent.system.tool.wait.md
new file mode 100644
index 0000000000..e8a30b0965
--- /dev/null
+++ b/prompts/agent.system.tool.wait.md
@@ -0,0 +1,34 @@
+### wait
+pause execution for a set time or until a timestamp
+use args "seconds" "minutes" "hours" "days" for duration
+use "until" with ISO timestamp for a specific time
+usage:
+
+1 wait duration
+~~~json
+{
+ "thoughts": [
+ "I need to wait..."
+ ],
+ "headline": "...",
+ "tool_name": "wait",
+ "tool_args": {
+ "minutes": 1,
+ "seconds": 30
+ }
+}
+~~~
+
+2 wait timestamp
+~~~json
+{
+ "thoughts": [
+ "I will wait until..."
+ ],
+ "headline": "...",
+ "tool_name": "wait",
+ "tool_args": {
+ "until": "2025-10-20T10:00:00Z"
+ }
+}
+~~~
diff --git a/prompts/agent.system.tools.py b/prompts/agent.system.tools.py
index bfbe150779..f94544b304 100644
--- a/prompts/agent.system.tools.py
+++ b/prompts/agent.system.tools.py
@@ -5,8 +5,8 @@
from python.helpers.print_style import PrintStyle
-class CallSubordinate(VariablesPlugin):
- def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]:
+class BuidToolsPrompt(VariablesPlugin):
+ def get_variables(self, file: str, backup_dirs: list[str] | None = None, **kwargs) -> dict[str, Any]:
# collect all prompt folders in order of their priority
folder = files.get_abs_path(os.path.dirname(file))
@@ -22,7 +22,7 @@ def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict
tools = []
for prompt_file in prompt_files:
try:
- tool = files.read_prompt_file(prompt_file)
+ tool = files.read_prompt_file(prompt_file, **kwargs)
tools.append(tool)
except Exception as e:
PrintStyle().error(f"Error loading tool '{prompt_file}': {e}")
diff --git a/prompts/fw.code.running.md b/prompts/fw.code.running.md
new file mode 100644
index 0000000000..94a8e93ec6
--- /dev/null
+++ b/prompts/fw.code.running.md
@@ -0,0 +1 @@
+Terminal session {{session}} is still running. Decide to wait for more 'output', 'reset', or use another session number based on situation.
\ No newline at end of file
diff --git a/prompts/fw.msg_critical_error.md b/prompts/fw.msg_critical_error.md
new file mode 100644
index 0000000000..0bdeda139e
--- /dev/null
+++ b/prompts/fw.msg_critical_error.md
@@ -0,0 +1 @@
+This error has occurred: {{error_message}}. Proceed with your original task if possible.
\ No newline at end of file
diff --git a/prompts/fw.wait_complete.md b/prompts/fw.wait_complete.md
new file mode 100644
index 0000000000..3b6d6124de
--- /dev/null
+++ b/prompts/fw.wait_complete.md
@@ -0,0 +1 @@
+Wait complete. Reached {{target_time}}.
\ No newline at end of file
diff --git a/python/api/api_log_get.py b/python/api/api_log_get.py
index c09fdfdc0a..8111dbea5c 100644
--- a/python/api/api_log_get.py
+++ b/python/api/api_log_get.py
@@ -32,7 +32,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
return Response('{"error": "context_id is required"}', status=400, mimetype="application/json")
# Get context
- context = AgentContext.get(context_id)
+ context = AgentContext.use(context_id)
if not context:
return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
diff --git a/python/api/api_message.py b/python/api/api_message.py
index 4d3248c60a..c25cb53694 100644
--- a/python/api/api_message.py
+++ b/python/api/api_message.py
@@ -3,8 +3,9 @@
from datetime import datetime, timedelta
from agent import AgentContext, UserMessage, AgentContextType
from python.helpers.api import ApiHandler, Request, Response
-from python.helpers import files
+from python.helpers import files, projects
from python.helpers.print_style import PrintStyle
+from python.helpers.projects import activate_project
from werkzeug.utils import secure_filename
from initialize import initialize_agent
import threading
@@ -33,6 +34,13 @@ async def process(self, input: dict, request: Request) -> dict | Response:
message = input.get("message", "")
attachments = input.get("attachments", [])
lifetime_hours = input.get("lifetime_hours", 24) # Default 24 hours
+ project_name = input.get("project_name", None)
+ agent_profile = input.get("agent_profile", None)
+
+ # Set an agent if profile provided
+ override_settings = {}
+ if agent_profile:
+ override_settings["agent_profile"] = agent_profile
if not message:
return Response('{"error": "Message is required"}', status=400, mimetype="application/json")
@@ -68,13 +76,44 @@ async def process(self, input: dict, request: Request) -> dict | Response:
# Get or create context
if context_id:
- context = AgentContext.get(context_id)
+ context = AgentContext.use(context_id)
if not context:
return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
+
+ # Validation: if agent profile is provided, it must match the exising
+ if agent_profile and context.agent0.config.profile != agent_profile:
+ return Response('{"error": "Cannot override agent profile on existing context"}', status=400, mimetype="application/json")
+
+
+ # Validation: if project is provided but context already has different project
+ existing_project = context.get_data(projects.CONTEXT_DATA_KEY_PROJECT)
+ if project_name and existing_project and existing_project != project_name:
+ return Response('{"error": "Project can only be set on first message"}', status=400, mimetype="application/json")
else:
- config = initialize_agent()
+ config = initialize_agent(override_settings=override_settings)
context = AgentContext(config=config, type=AgentContextType.USER)
+ AgentContext.use(context.id)
context_id = context.id
+ # Activate project if provided
+ if project_name:
+ try:
+ activate_project(context_id, project_name)
+ except Exception as e:
+ # Handle project or context errors more gracefully
+ error_msg = str(e)
+ PrintStyle.error(f"Failed to activate project '{project_name}' for context '{context_id}': {error_msg}")
+ return Response(
+ f'{{"error": "Failed to activate project \\"{project_name}\\""}}',
+ status=500,
+ mimetype="application/json",
+ )
+
+ # Activate project if provided
+ if project_name:
+ try:
+ projects.activate_project(context_id, project_name)
+ except Exception as e:
+ return Response(f'{{"error": "Failed to activate project: {str(e)}"}}', status=400, mimetype="application/json")
# Update chat lifetime
with self._cleanup_lock:
diff --git a/python/api/api_reset_chat.py b/python/api/api_reset_chat.py
index b497adb1e0..bf0a10f8a3 100644
--- a/python/api/api_reset_chat.py
+++ b/python/api/api_reset_chat.py
@@ -35,7 +35,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
)
# Check if context exists
- context = AgentContext.get(context_id)
+ context = AgentContext.use(context_id)
if not context:
return Response(
'{"error": "Chat context not found"}',
diff --git a/python/api/api_terminate_chat.py b/python/api/api_terminate_chat.py
index 3d6bcb4b4c..e746d84c5f 100644
--- a/python/api/api_terminate_chat.py
+++ b/python/api/api_terminate_chat.py
@@ -35,7 +35,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
)
# Check if context exists
- context = AgentContext.get(context_id)
+ context = AgentContext.use(context_id)
if not context:
return Response(
'{"error": "Chat context not found"}',
diff --git a/python/api/banners.py b/python/api/banners.py
new file mode 100644
index 0000000000..88b171b8e2
--- /dev/null
+++ b/python/api/banners.py
@@ -0,0 +1,22 @@
+from python.helpers.api import ApiHandler, Request, Response
+from python.helpers.extension import call_extensions
+
+
+class GetBanners(ApiHandler):
+ """
+ API endpoint for Welcome Screen banners.
+ Add checks as extension scripts in python/extensions/banners/ or usr/extensions/banners/
+ """
+
+ async def process(self, input: dict, request: Request) -> dict | Response:
+ banners = input.get("banners", [])
+ frontend_context = input.get("context", {})
+
+ # Banners array passed by reference - extensions append directly to it
+ await call_extensions("banners", agent=None, banners=banners, frontend_context=frontend_context)
+
+ return {"banners": banners}
+
+ @classmethod
+ def get_methods(cls) -> list[str]:
+ return ["POST"]
diff --git a/python/api/chat_create.py b/python/api/chat_create.py
new file mode 100644
index 0000000000..f73f3416d8
--- /dev/null
+++ b/python/api/chat_create.py
@@ -0,0 +1,32 @@
+from python.helpers.api import ApiHandler, Input, Output, Request, Response
+
+
+from python.helpers import projects, guids
+from agent import AgentContext
+
+
+class CreateChat(ApiHandler):
+ async def process(self, input: Input, request: Request) -> Output:
+ current_ctxid = input.get("current_context", "") # current context id
+ new_ctxid = input.get("new_context", guids.generate_id()) # given or new guid
+
+ # context instance - get or create
+ current_context = AgentContext.get(current_ctxid)
+
+ # get/create new context
+ new_context = self.use_context(new_ctxid)
+
+ # copy selected data from current to new context
+ if current_context:
+ current_data_1 = current_context.get_data(projects.CONTEXT_DATA_KEY_PROJECT)
+ if current_data_1:
+ new_context.set_data(projects.CONTEXT_DATA_KEY_PROJECT, current_data_1)
+ current_data_2 = current_context.get_output_data(projects.CONTEXT_DATA_KEY_PROJECT)
+ if current_data_2:
+ new_context.set_output_data(projects.CONTEXT_DATA_KEY_PROJECT, current_data_2)
+
+ return {
+ "ok": True,
+ "ctxid": new_context.id,
+ "message": "Context created.",
+ }
diff --git a/python/api/chat_export.py b/python/api/chat_export.py
index 2817a4ea85..a82be6483e 100644
--- a/python/api/chat_export.py
+++ b/python/api/chat_export.py
@@ -8,7 +8,7 @@ async def process(self, input: Input, request: Request) -> Output:
if not ctxid:
raise Exception("No context id provided")
- context = self.get_context(ctxid)
+ context = self.use_context(ctxid)
content = persist_chat.export_json_chat(context)
return {
"message": "Chats exported.",
diff --git a/python/api/chat_files_path_get.py b/python/api/chat_files_path_get.py
new file mode 100644
index 0000000000..b230d31f73
--- /dev/null
+++ b/python/api/chat_files_path_get.py
@@ -0,0 +1,23 @@
+from python.helpers.api import ApiHandler, Request, Response
+from python.helpers import files, memory, notification, projects, notification, runtime
+import os
+from werkzeug.utils import secure_filename
+
+
+class GetChatFilesPath(ApiHandler):
+ async def process(self, input: dict, request: Request) -> dict | Response:
+ ctxid = input.get("ctxid", "")
+ if not ctxid:
+ raise Exception("No context id provided")
+ context = self.use_context(ctxid)
+
+ project_name = projects.get_context_project_name(context)
+ if project_name:
+ folder = files.normalize_a0_path(projects.get_project_folder(project_name))
+ else:
+ folder = "/root" # root in container
+
+ return {
+ "ok": True,
+ "path": folder,
+ }
\ No newline at end of file
diff --git a/python/api/chat_remove.py b/python/api/chat_remove.py
index a0b186e140..671e43d9ea 100644
--- a/python/api/chat_remove.py
+++ b/python/api/chat_remove.py
@@ -8,7 +8,10 @@ class RemoveChat(ApiHandler):
async def process(self, input: Input, request: Request) -> Output:
ctxid = input.get("context", "")
- context = AgentContext.get(ctxid)
+ scheduler = TaskScheduler.get()
+ scheduler.cancel_tasks_by_context(ctxid, terminate_thread=True)
+
+ context = AgentContext.use(ctxid)
if context:
# stop processing any tasks
context.reset()
@@ -16,7 +19,6 @@ async def process(self, input: Input, request: Request) -> Output:
AgentContext.remove(ctxid)
persist_chat.remove_chat(ctxid)
- scheduler = TaskScheduler.get()
await scheduler.reload()
tasks = scheduler.get_tasks_by_context_id(ctxid)
diff --git a/python/api/chat_reset.py b/python/api/chat_reset.py
index 5086aacc1a..668b08e268 100644
--- a/python/api/chat_reset.py
+++ b/python/api/chat_reset.py
@@ -2,14 +2,18 @@
from python.helpers import persist_chat
+from python.helpers.task_scheduler import TaskScheduler
class Reset(ApiHandler):
async def process(self, input: Input, request: Request) -> Output:
ctxid = input.get("context", "")
+ # attempt to stop any scheduler tasks bound to this context
+ TaskScheduler.get().cancel_tasks_by_context(ctxid, terminate_thread=True)
+
# context instance - get or create
- context = self.get_context(ctxid)
+ context = self.use_context(ctxid)
context.reset()
persist_chat.save_tmp_chat(context)
persist_chat.remove_msg_files(ctxid)
diff --git a/python/api/csrf_token.py b/python/api/csrf_token.py
index bd8615546c..f4d1d63c0f 100644
--- a/python/api/csrf_token.py
+++ b/python/api/csrf_token.py
@@ -1,4 +1,5 @@
import secrets
+from urllib.parse import urlparse
from python.helpers.api import (
ApiHandler,
Input,
@@ -7,7 +8,11 @@
Response,
session,
)
-from python.helpers import runtime
+from python.helpers import runtime, dotenv, login
+import fnmatch
+
+ALLOWED_ORIGINS_KEY = "ALLOWED_ORIGINS"
+
class GetCsrfToken(ApiHandler):
@@ -20,6 +25,124 @@ def requires_csrf(cls) -> bool:
return False
async def process(self, input: Input, request: Request) -> Output:
+
+ # check for allowed origin to prevent dns rebinding attacks
+ origin_check = await self.check_allowed_origin(request)
+ if not origin_check["ok"]:
+ return {
+ "ok": False,
+ "error": f"Origin {self.get_origin_from_request(request)} not allowed when login is disabled. Set login and password or add your URL to ALLOWED_ORIGINS env variable. Currently allowed origins: {','.join(origin_check['allowed_origins'])}",
+ }
+
+ # generate a csrf token if it doesn't exist
if "csrf_token" not in session:
session["csrf_token"] = secrets.token_urlsafe(32)
- return {"token": session["csrf_token"], "runtime_id": runtime.get_runtime_id()}
+
+ # return the csrf token and runtime id
+ return {
+ "ok": True,
+ "token": session["csrf_token"],
+ "runtime_id": runtime.get_runtime_id(),
+ }
+
+ async def check_allowed_origin(self, request: Request):
+ # if login is required, this check is unnecessary
+ if login.is_login_required():
+ return {"ok": True, "origin": "", "allowed_origins": ""}
+ # initialize allowed origins if not yet set
+ self.initialize_allowed_origins(request)
+ # otherwise, check if the origin is allowed
+ return await self.is_allowed_origin(request)
+
+ async def is_allowed_origin(self, request: Request):
+ # get the origin from the request
+ origin = self.get_origin_from_request(request)
+ if not origin:
+ return {"ok": False, "origin": "", "allowed_origins": ""}
+
+ # list of allowed origins
+ allowed_origins = await self.get_allowed_origins()
+
+ # check if the origin is allowed
+ match = any(
+ fnmatch.fnmatch(origin, allowed_origin)
+ for allowed_origin in allowed_origins
+ )
+ return {"ok": match, "origin": origin, "allowed_origins": allowed_origins}
+
+
+ def get_origin_from_request(self, request: Request):
+ # get from origin
+ r = request.headers.get("Origin") or request.environ.get("HTTP_ORIGIN")
+ if not r:
+ # try referer if origin not present
+ r = (
+ request.headers.get("Referer")
+ or request.referrer
+ or request.environ.get("HTTP_REFERER")
+ )
+ if not r:
+ return None
+ # parse and normalize
+ p = urlparse(r)
+ if not p.scheme or not p.hostname:
+ return None
+ return f"{p.scheme}://{p.hostname}" + (f":{p.port}" if p.port else "")
+
+ async def get_allowed_origins(self) -> list[str]:
+ # get the allowed origins from the environment
+ allowed_origins = [
+ origin.strip()
+ for origin in (dotenv.get_dotenv_value(ALLOWED_ORIGINS_KEY) or "").split(",")
+ if origin.strip()
+ ]
+
+ # if there are no allowed origins, allow default localhosts
+ if not allowed_origins:
+ allowed_origins = self.get_default_allowed_origins()
+
+ # always allow tunnel url if running
+ try:
+ from python.api.tunnel_proxy import process as tunnel_api_process
+
+ tunnel = await tunnel_api_process({"action": "get"})
+ if tunnel and isinstance(tunnel, dict) and tunnel["success"]:
+ allowed_origins.append(tunnel["tunnel_url"])
+ except Exception:
+ pass
+
+ return allowed_origins
+
+ def get_default_allowed_origins(self) -> list[str]:
+ return ["*://localhost:*", "*://127.0.0.1:*", "*://0.0.0.0:*"]
+
+ def initialize_allowed_origins(self, request: Request):
+ """
+ If A0 is hosted on a server, add the first visit origin to ALLOWED_ORIGINS.
+ This simplifies deployment process as users can access their new instance without
+ additional setup while keeping it secure.
+ """
+ # dotenv value is already set, do nothing
+ denv = dotenv.get_dotenv_value(ALLOWED_ORIGINS_KEY)
+ if denv:
+ return
+
+ # get the origin from the request
+ req_origin = self.get_origin_from_request(request)
+ if not req_origin:
+ return
+
+ # check if the origin is allowed by default
+ allowed_origins = self.get_default_allowed_origins()
+ match = any(
+ fnmatch.fnmatch(req_origin, allowed_origin)
+ for allowed_origin in allowed_origins
+ )
+ if match:
+ return
+
+ # if not, add it to the allowed origins
+ allowed_origins.append(req_origin)
+ dotenv.save_dotenv_value(ALLOWED_ORIGINS_KEY, ",".join(allowed_origins))
+
+
\ No newline at end of file
diff --git a/python/api/ctx_window_get.py b/python/api/ctx_window_get.py
index 16a4438b7a..46573cb608 100644
--- a/python/api/ctx_window_get.py
+++ b/python/api/ctx_window_get.py
@@ -6,7 +6,7 @@
class GetCtxWindow(ApiHandler):
async def process(self, input: Input, request: Request) -> Output:
ctxid = input.get("context", [])
- context = self.get_context(ctxid)
+ context = self.use_context(ctxid)
agent = context.streaming_agent or context.agent0
window = agent.get_data(agent.DATA_NAME_CTX_WINDOW)
if not window or not isinstance(window, dict):
diff --git a/python/api/download_work_dir_file.py b/python/api/download_work_dir_file.py
index 454ae5a12f..747c6a4940 100644
--- a/python/api/download_work_dir_file.py
+++ b/python/api/download_work_dir_file.py
@@ -7,6 +7,8 @@
from python.helpers.api import ApiHandler, Input, Output, Request
from python.helpers import files, runtime
from python.api import file_info
+from urllib.parse import quote
+
def stream_file_download(file_source, download_name, chunk_size=8192):
@@ -63,7 +65,7 @@ def generate():
content_type=content_type,
direct_passthrough=True, # Prevent Flask from buffering the response
headers={
- 'Content-Disposition': f'attachment; filename="{download_name}"',
+ 'Content-Disposition': make_disposition(download_name),
'Content-Length': str(file_size), # Critical for browser progress bars
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no', # Disable nginx buffering
@@ -74,6 +76,15 @@ def generate():
return response
+def make_disposition(download_name: str) -> str:
+ # Basic ASCII fallback (strip or replace weird chars)
+ ascii_fallback = download_name.encode("ascii", "ignore").decode("ascii") or "download"
+ utf8_name = quote(download_name) # URL-encode UTF-8 bytes
+
+ # RFC 5987: filename* with UTF-8
+ return f'attachment; filename="{ascii_fallback}"; filename*=UTF-8\'\'{utf8_name}'
+
+
class DownloadFile(ApiHandler):
@classmethod
diff --git a/python/api/get_work_dir_files.py b/python/api/get_work_dir_files.py
index 1783e3d8a0..13cd428d4a 100644
--- a/python/api/get_work_dir_files.py
+++ b/python/api/get_work_dir_files.py
@@ -1,6 +1,6 @@
from python.helpers.api import ApiHandler, Request, Response
from python.helpers.file_browser import FileBrowser
-from python.helpers import runtime
+from python.helpers import runtime, files
class GetWorkDirFiles(ApiHandler):
@@ -15,7 +15,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
# current_path = "work_dir"
# else:
# current_path = "root"
- current_path = "root"
+ current_path = "/a0"
# browser = FileBrowser()
# result = browser.get_files(current_path)
diff --git a/python/api/history_get.py b/python/api/history_get.py
index 7ff40e3fe9..608a523ecc 100644
--- a/python/api/history_get.py
+++ b/python/api/history_get.py
@@ -4,7 +4,7 @@
class GetHistory(ApiHandler):
async def process(self, input: dict, request: Request) -> dict | Response:
ctxid = input.get("context", [])
- context = self.get_context(ctxid)
+ context = self.use_context(ctxid)
agent = context.streaming_agent or context.agent0
history = agent.history.output_text()
size = agent.history.get_tokens()
diff --git a/python/api/import_knowledge.py b/python/api/import_knowledge.py
index fd1b26542d..bfc25b6490 100644
--- a/python/api/import_knowledge.py
+++ b/python/api/import_knowledge.py
@@ -13,7 +13,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
if not ctxid:
raise Exception("No context id provided")
- context = self.get_context(ctxid)
+ context = self.use_context(ctxid)
file_list = request.files.getlist("files[]")
KNOWLEDGE_FOLDER = files.get_abs_path(memory.get_custom_knowledge_subdir_abs(context.agent0), "main")
diff --git a/python/api/knowledge_path_get.py b/python/api/knowledge_path_get.py
new file mode 100644
index 0000000000..dadf0a692b
--- /dev/null
+++ b/python/api/knowledge_path_get.py
@@ -0,0 +1,25 @@
+from python.helpers.api import ApiHandler, Request, Response
+from python.helpers import files, memory, notification, projects, notification
+import os
+from werkzeug.utils import secure_filename
+
+
+class GetKnowledgePath(ApiHandler):
+ async def process(self, input: dict, request: Request) -> dict | Response:
+ ctxid = input.get("ctxid", "")
+ if not ctxid:
+ raise Exception("No context id provided")
+ context = self.use_context(ctxid)
+
+ project_name = projects.get_context_project_name(context)
+ if project_name:
+ knowledge_folder = projects.get_project_meta_folder(project_name, "knowledge")
+ else:
+ knowledge_folder = memory.get_custom_knowledge_subdir_abs(context.agent0)
+
+ knowledge_folder = files.normalize_a0_path(knowledge_folder)
+
+ return {
+ "ok": True,
+ "path": knowledge_folder,
+ }
\ No newline at end of file
diff --git a/python/api/knowledge_reindex.py b/python/api/knowledge_reindex.py
new file mode 100644
index 0000000000..b86eafeaf1
--- /dev/null
+++ b/python/api/knowledge_reindex.py
@@ -0,0 +1,21 @@
+from python.helpers.api import ApiHandler, Request, Response
+from python.helpers import files, memory, notification, projects, notification
+import os
+from werkzeug.utils import secure_filename
+
+
+class ReindexKnowledge(ApiHandler):
+ async def process(self, input: dict, request: Request) -> dict | Response:
+ ctxid = input.get("ctxid", "")
+ if not ctxid:
+ raise Exception("No context id provided")
+ context = self.use_context(ctxid)
+
+ # reload memory to re-import knowledge
+ await memory.Memory.reload(context.agent0)
+ context.log.set_initial_progress()
+
+ return {
+ "ok": True,
+ "message": "Knowledge re-indexed",
+ }
diff --git a/python/api/memory_dashboard.py b/python/api/memory_dashboard.py
index a494227c8b..d1275fe417 100644
--- a/python/api/memory_dashboard.py
+++ b/python/api/memory_dashboard.py
@@ -1,8 +1,9 @@
from python.helpers.api import ApiHandler, Request, Response
-from python.helpers.memory import Memory
+from python.helpers.memory import Memory, get_existing_memory_subdirs, get_context_memory_subdir
from python.helpers import files
from models import ModelConfig, ModelType
from langchain_core.documents import Document
+from agent import AgentContext
class MemoryDashboard(ApiHandler):
@@ -113,21 +114,13 @@ async def _get_current_memory_subdir(self, input: dict) -> dict:
# Fallback to default if no context available
return {"success": True, "memory_subdir": "default"}
- # Import AgentContext here to avoid circular imports
- from agent import AgentContext
-
- # Get the context and extract memory subdirectory
- context = AgentContext.get(context_id)
- if (
- context
- and hasattr(context, "config")
- and hasattr(context.config, "memory_subdir")
- ):
- memory_subdir = context.config.memory_subdir or "default"
- return {"success": True, "memory_subdir": memory_subdir}
- else:
+ context = AgentContext.use(context_id)
+ if not context:
return {"success": True, "memory_subdir": "default"}
+ memory_subdir = get_context_memory_subdir(context)
+ return {"success": True, "memory_subdir": memory_subdir or "default"}
+
except Exception:
return {
"success": True, # Still success, just fallback to default
@@ -138,12 +131,7 @@ async def _get_memory_subdirs(self) -> dict:
"""Get available memory subdirectories."""
try:
# Get subdirectories from memory folder
- subdirs = files.get_subdirectories("memory", exclude="embeddings")
-
- # Ensure 'default' is always available
- if "default" not in subdirs:
- subdirs.insert(0, "default")
-
+ subdirs = get_existing_memory_subdirs()
return {"success": True, "subdirs": subdirs}
except Exception as e:
return {
diff --git a/python/api/message.py b/python/api/message.py
index 2f88f4784d..bd378e4f79 100644
--- a/python/api/message.py
+++ b/python/api/message.py
@@ -1,7 +1,7 @@
from agent import AgentContext, UserMessage
from python.helpers.api import ApiHandler, Request, Response
-from python.helpers import files
+from python.helpers import files, extension
import os
from werkzeug.utils import secure_filename
from python.helpers.defer import DeferredTask
@@ -53,7 +53,13 @@ async def communicate(self, input: dict, request: Request):
message = text
# Obtain agent context
- context = self.get_context(ctxid)
+ context = self.use_context(ctxid)
+
+ # call extension point, alow it to modify data
+ data = { "message": message, "attachment_paths": attachment_paths }
+ await extension.call_extensions("user_message_ui", agent=context.get_agent(), data=data)
+ message = data.get("message", "")
+ attachment_paths = data.get("attachment_paths", [])
# Store attachments in agent data
# context.agent0.set_data("attachments", attachment_paths)
diff --git a/python/api/nudge.py b/python/api/nudge.py
index 64683da070..558734cdf0 100644
--- a/python/api/nudge.py
+++ b/python/api/nudge.py
@@ -6,7 +6,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
if not ctxid:
raise Exception("No context id provided")
- context = self.get_context(ctxid)
+ context = self.use_context(ctxid)
context.nudge()
msg = "Process reset, agent nudged."
diff --git a/python/api/pause.py b/python/api/pause.py
index e4b20ecfb7..47b444e802 100644
--- a/python/api/pause.py
+++ b/python/api/pause.py
@@ -8,7 +8,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
ctxid = input.get("context", "")
# context instance - get or create
- context = self.get_context(ctxid)
+ context = self.use_context(ctxid)
context.paused = paused
diff --git a/python/api/poll.py b/python/api/poll.py
index 698321cbd4..dbe7105c66 100644
--- a/python/api/poll.py
+++ b/python/api/poll.py
@@ -18,10 +18,17 @@ async def process(self, input: dict, request: Request) -> dict | Response:
timezone = input.get("timezone", get_dotenv_value("DEFAULT_USER_TIMEZONE", "UTC"))
Localization.get().set_timezone(timezone)
- # context instance - get or create
- context = self.get_context(ctxid)
-
- logs = context.log.output(start=from_no)
+ # context instance - get or create only if ctxid is provided
+ if ctxid:
+ try:
+ context = self.use_context(ctxid, create_if_not_exists=False)
+ except Exception as e:
+ context = None
+ else:
+ context = None
+
+ # Get logs only if we have a context
+ logs = context.log.output(start=from_no) if context else []
# Get notifications from global notification manager
notification_manager = AgentContext.get_notification_manager()
@@ -54,7 +61,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
continue
# Create the base context data that will be returned
- context_data = ctx.serialize()
+ context_data = ctx.output()
context_task = scheduler.get_task_by_uuid(ctx.id)
# Determine if this is a task-dedicated context by checking if a task with this UUID exists
@@ -102,15 +109,16 @@ async def process(self, input: dict, request: Request) -> dict | Response:
# data from this server
return {
- "context": context.id,
+ "deselect_chat": ctxid and not context,
+ "context": context.id if context else "",
"contexts": ctxs,
"tasks": tasks,
"logs": logs,
- "log_guid": context.log.guid,
- "log_version": len(context.log.updates),
- "log_progress": context.log.progress,
- "log_progress_active": context.log.progress_active,
- "paused": context.paused,
+ "log_guid": context.log.guid if context else "",
+ "log_version": len(context.log.updates) if context else 0,
+ "log_progress": context.log.progress if context else 0,
+ "log_progress_active": context.log.progress_active if context else False,
+ "paused": context.paused if context else False,
"notifications": notifications,
"notifications_guid": notification_manager.guid,
"notifications_version": len(notification_manager.updates),
diff --git a/python/api/projects.py b/python/api/projects.py
new file mode 100644
index 0000000000..3e06fadcdb
--- /dev/null
+++ b/python/api/projects.py
@@ -0,0 +1,91 @@
+from python.helpers.api import ApiHandler, Input, Output, Request, Response
+from python.helpers import projects
+
+
+class Projects(ApiHandler):
+ async def process(self, input: Input, request: Request) -> Output:
+ action = input.get("action", "")
+ ctxid = input.get("context_id", None)
+
+ if ctxid:
+ _context = self.use_context(ctxid)
+
+ try:
+ if action == "list":
+ data = self.get_active_projects_list()
+ elif action == "load":
+ data = self.load_project(input.get("name", None))
+ elif action == "create":
+ data = self.create_project(input.get("project", None))
+ elif action == "update":
+ data = self.update_project(input.get("project", None))
+ elif action == "delete":
+ data = self.delete_project(input.get("name", None))
+ elif action == "activate":
+ data = self.activate_project(ctxid, input.get("name", None))
+ elif action == "deactivate":
+ data = self.deactivate_project(ctxid)
+ elif action == "file_structure":
+ data = self.get_file_structure(input.get("name", None), input.get("settings"))
+ else:
+ raise Exception("Invalid action")
+
+ return {
+ "ok": True,
+ "data": data,
+ }
+ except Exception as e:
+ return {
+ "ok": False,
+ "error": str(e),
+ }
+
+ def get_active_projects_list(self):
+ return projects.get_active_projects_list()
+
+ def create_project(self, project: dict|None):
+ if project is None:
+ raise Exception("Project data is required")
+ data = projects.BasicProjectData(**project)
+ name = projects.create_project(project["name"], data)
+ return projects.load_edit_project_data(name)
+
+ def load_project(self, name: str|None):
+ if name is None:
+ raise Exception("Project name is required")
+ return projects.load_edit_project_data(name)
+
+ def update_project(self, project: dict|None):
+ if project is None:
+ raise Exception("Project data is required")
+ data = projects.EditProjectData(**project)
+ name = projects.update_project(project["name"], data)
+ return projects.load_edit_project_data(name)
+
+ def delete_project(self, name: str|None):
+ if name is None:
+ raise Exception("Project name is required")
+ return projects.delete_project(name)
+
+ def activate_project(self, context_id: str|None, name: str|None):
+ if not context_id:
+ raise Exception("Context ID is required")
+ if not name:
+ raise Exception("Project name is required")
+ return projects.activate_project(context_id, name)
+
+ def deactivate_project(self, context_id: str|None):
+ if not context_id:
+ raise Exception("Context ID is required")
+ return projects.deactivate_project(context_id)
+
+ def get_file_structure(self, name: str|None, settings: dict|None):
+ if not name:
+ raise Exception("Project name is required")
+ # project data
+ basic_data = projects.load_basic_project_data(name)
+ # override file structure settings
+ if settings:
+ basic_data["file_structure"] = settings # type: ignore
+ # get structure
+ return projects.get_file_structure(name, basic_data)
\ No newline at end of file
diff --git a/python/api/scheduler_task_create.py b/python/api/scheduler_task_create.py
index c091b3b198..48aeb24e89 100644
--- a/python/api/scheduler_task_create.py
+++ b/python/api/scheduler_task_create.py
@@ -3,6 +3,7 @@
TaskScheduler, ScheduledTask, AdHocTask, PlannedTask, TaskSchedule,
serialize_task, parse_task_schedule, parse_task_plan, TaskType
)
+from python.helpers.projects import load_basic_project_data
from python.helpers.localization import Localization
from python.helpers.print_style import PrintStyle
import random
@@ -27,7 +28,26 @@ async def process(self, input: Input, request: Request) -> Output:
system_prompt = input.get("system_prompt", "")
prompt = input.get("prompt")
attachments = input.get("attachments", [])
- context_id = input.get("context_id", None)
+
+ requested_project_slug = input.get("project_name")
+ if isinstance(requested_project_slug, str):
+ requested_project_slug = requested_project_slug.strip() or None
+ else:
+ requested_project_slug = None
+
+ project_slug = requested_project_slug
+ project_color = None
+
+ if project_slug:
+ try:
+ metadata = load_basic_project_data(requested_project_slug)
+ project_color = metadata.get("color") or None
+ except Exception as exc:
+ printer.error(f"SchedulerTaskCreate: failed to load project '{project_slug}': {exc}")
+ return {"error": f"Saving project failed: {project_slug}"}
+
+ # Always dedicated context for scheduler tasks created by ui
+ task_context_id = None
# Check if schedule is provided (for ScheduledTask)
schedule = input.get("schedule", {})
@@ -77,8 +97,10 @@ async def process(self, input: Input, request: Request) -> Output:
prompt=prompt,
schedule=task_schedule,
attachments=attachments,
- context_id=context_id,
- timezone=timezone
+ context_id=task_context_id,
+ timezone=timezone,
+ project_name=project_slug,
+ project_color=project_color,
)
elif plan:
# Create a planned task
@@ -94,7 +116,9 @@ async def process(self, input: Input, request: Request) -> Output:
prompt=prompt,
plan=task_plan,
attachments=attachments,
- context_id=context_id
+ context_id=task_context_id,
+ project_name=project_slug,
+ project_color=project_color,
)
else:
# Create an ad-hoc task
@@ -105,7 +129,9 @@ async def process(self, input: Input, request: Request) -> Output:
prompt=prompt,
token=token,
attachments=attachments,
- context_id=context_id
+ context_id=task_context_id,
+ project_name=project_slug,
+ project_color=project_color,
)
# Verify token after creation
if isinstance(task, AdHocTask):
@@ -132,5 +158,6 @@ async def process(self, input: Input, request: Request) -> Output:
printer.print(f"Serialized adhoc task, token in response: '{task_dict.get('token')}'")
return {
+ "ok": True,
"task": task_dict
}
diff --git a/python/api/scheduler_task_delete.py b/python/api/scheduler_task_delete.py
index 59e7187992..5e41a0bd61 100644
--- a/python/api/scheduler_task_delete.py
+++ b/python/api/scheduler_task_delete.py
@@ -30,10 +30,11 @@ async def process(self, input: Input, request: Request) -> Output:
context = None
if task.context_id:
- context = self.get_context(task.context_id)
+ context = self.use_context(task.context_id)
# If the task is running, update its state to IDLE first
if task.state == TaskState.RUNNING:
+ scheduler.cancel_running_task(task_id, terminate_thread=True)
if context:
context.reset()
# Update the state to IDLE so any ongoing processes know to terminate
diff --git a/python/api/scheduler_task_update.py b/python/api/scheduler_task_update.py
index 433738652e..b5b73cb59a 100644
--- a/python/api/scheduler_task_update.py
+++ b/python/api/scheduler_task_update.py
@@ -48,6 +48,9 @@ async def process(self, input: Input, request: Request) -> Output:
if "attachments" in input:
update_params["attachments"] = input.get("attachments", [])
+ if "project_name" in input or "project_color" in input:
+ return {"error": "Project changes are not allowed"}
+
# Update schedule if this is a scheduled task and schedule is provided
if isinstance(task, ScheduledTask) and "schedule" in input:
schedule_data = input.get("schedule", {})
@@ -85,5 +88,6 @@ async def process(self, input: Input, request: Request) -> Output:
task_dict = serialize_task(updated_task)
return {
+ "ok": True,
"task": task_dict
}
diff --git a/python/api/scheduler_tasks_list.py b/python/api/scheduler_tasks_list.py
index 30a8c3f068..8d07235d23 100644
--- a/python/api/scheduler_tasks_list.py
+++ b/python/api/scheduler_tasks_list.py
@@ -22,8 +22,8 @@ async def process(self, input: Input, request: Request) -> Output:
# Use the scheduler's convenience method for task serialization
tasks_list = scheduler.serialize_all_tasks()
- return {"tasks": tasks_list}
+ return {"ok": True, "tasks": tasks_list}
except Exception as e:
PrintStyle.error(f"Failed to list tasks: {str(e)} {traceback.format_exc()}")
- return {"error": f"Failed to list tasks: {str(e)} {traceback.format_exc()}", "tasks": []}
+ return {"ok": False, "error": f"Failed to list tasks: {str(e)} {traceback.format_exc()}", "tasks": []}
diff --git a/python/api/settings_get.py b/python/api/settings_get.py
index 5b5bf95c7e..5285b4fd4b 100644
--- a/python/api/settings_get.py
+++ b/python/api/settings_get.py
@@ -4,8 +4,9 @@
class GetSettings(ApiHandler):
async def process(self, input: dict, request: Request) -> dict | Response:
- set = settings.convert_out(settings.get_settings())
- return {"settings": set}
+ backend = settings.get_settings()
+ out = settings.convert_out(backend)
+ return dict(out)
@classmethod
def get_methods(cls) -> list[str]:
diff --git a/python/api/settings_refresh_models.py b/python/api/settings_refresh_models.py
new file mode 100644
index 0000000000..dbccb5c229
--- /dev/null
+++ b/python/api/settings_refresh_models.py
@@ -0,0 +1,73 @@
+from typing import Any
+
+import models as models_module
+from python.helpers.api import ApiHandler, Request, Response
+from python.helpers.model_discovery import (
+ get_models_for_provider,
+ clear_cache,
+)
+
+# Placeholder used for masked API keys in UI
+API_KEY_PLACEHOLDER = "************"
+
+
+class RefreshModels(ApiHandler):
+ """
+ API endpoint to dynamically fetch model options from provider APIs.
+
+ Called when:
+ - User changes the provider dropdown
+ - User enters/updates an API key
+ - User explicitly requests a refresh
+
+ Input:
+ model_type: "chat" or "embedding"
+ provider: Provider ID (e.g., "openai", "anthropic", "openrouter")
+ api_keys: Dictionary of API keys (may contain placeholders)
+ api_base: Optional custom API base URL for OpenAI-compatible providers
+ force_refresh: Optional, if True bypasses cache
+ clear_cache: Optional, if True clears all cache first
+
+ Returns:
+ models: List of {value, label} options fetched from the provider's API
+ """
+
+ async def process(
+ self, input: dict[Any, Any], request: Request
+ ) -> dict[Any, Any] | Response:
+ model_type = input.get("model_type", "chat")
+ provider = input.get("provider", "")
+ api_keys_input = input.get("api_keys", {})
+ api_base = input.get("api_base", "")
+ force_refresh = input.get("force_refresh", False)
+ should_clear_cache = input.get("clear_cache", False)
+
+ # Handle cache clear request
+ if should_clear_cache:
+ clear_cache()
+
+ if not provider:
+ return {"models": [{"value": "__custom__", "label": "Custom (enter manually)"}]}
+
+ # Resolve actual API keys from environment when placeholders are passed
+ api_keys = {}
+ for prov, key in api_keys_input.items():
+ if key == API_KEY_PLACEHOLDER or not key:
+ # Get actual key from environment
+ actual_key = models_module.get_api_key(prov)
+ if actual_key and actual_key != "None":
+ api_keys[prov] = actual_key
+ else:
+ # Use the provided key (user may have just entered a new one)
+ api_keys[prov] = key
+
+ # Fetch models dynamically from provider API
+ models = await get_models_for_provider(
+ model_type=model_type,
+ provider=provider,
+ api_keys=api_keys,
+ api_base=api_base if api_base else None,
+ force_refresh=force_refresh,
+ )
+
+ return {"models": models}
diff --git a/python/api/settings_set.py b/python/api/settings_set.py
index c24a3cc66d..3213eada74 100644
--- a/python/api/settings_set.py
+++ b/python/api/settings_set.py
@@ -7,6 +7,8 @@
class SetSettings(ApiHandler):
async def process(self, input: dict[Any, Any], request: Request) -> dict[Any, Any] | Response:
- set = settings.convert_in(input)
- set = settings.set_settings(set)
- return {"settings": set}
+ frontend = input.get("settings", input)
+ backend = settings.convert_in(settings.Settings(**frontend))
+ backend = settings.set_settings(backend)
+ out = settings.convert_out(backend)
+ return dict(out)
diff --git a/python/api/subagents.py b/python/api/subagents.py
new file mode 100644
index 0000000000..6f501ac76b
--- /dev/null
+++ b/python/api/subagents.py
@@ -0,0 +1,58 @@
+from python.helpers.api import ApiHandler, Input, Output, Request, Response
+from python.helpers import subagents
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from python.helpers import projects
+
+class Subagents(ApiHandler):
+ async def process(self, input: Input, request: Request) -> Output:
+ action = input.get("action", "")
+ ctxid = input.get("context_id", None)
+
+ if ctxid:
+ _context = self.use_context(ctxid)
+
+ try:
+ if action == "list":
+ data = self.get_subagents_list()
+ elif action == "load":
+ data = self.load_agent(input.get("name", None))
+ elif action == "save":
+ data = self.save_agent(input.get("name", None), input.get("data", None))
+ elif action == "delete":
+ data = self.delete_agent(input.get("name", None))
+ else:
+ raise Exception("Invalid action")
+
+ return {
+ "ok": True,
+ "data": data,
+ }
+ except Exception as e:
+ return {
+ "ok": False,
+ "error": str(e),
+ }
+
+ def get_subagents_list(self):
+ return subagents.get_agents_list()
+
+ def load_agent(self, name: str|None):
+ if name is None:
+ raise Exception("Subagent name is required")
+ return subagents.load_agent_data(name)
+
+ def save_agent(self, name:str|None, data: dict|None):
+ if name is None:
+ raise Exception("Subagent name is required")
+ if data is None:
+ raise Exception("Subagent data is required")
+ subagent = subagents.SubAgent(**data)
+ subagents.save_agent_data(name, subagent)
+ return subagents.load_agent_data(name)
+
+ def delete_agent(self, name: str|None):
+ if name is None:
+ raise Exception("Subagent name is required")
+ subagents.delete_agent_data(name)
\ No newline at end of file
diff --git a/python/api/synthesize.py b/python/api/synthesize.py
index 071af8760f..87f93eeaa2 100644
--- a/python/api/synthesize.py
+++ b/python/api/synthesize.py
@@ -7,9 +7,11 @@
class Synthesize(ApiHandler):
async def process(self, input: dict, request: Request) -> dict | Response:
text = input.get("text", "")
- # ctxid = input.get("ctxid", "")
+ ctxid = input.get("ctxid", "")
- # context = self.get_context(ctxid)
+ if ctxid:
+ context = self.use_context(ctxid)
+
# if not await kokoro_tts.is_downloaded():
# context.log.log(type="info", content="Kokoro TTS model is currently being initialized, please wait...")
diff --git a/python/api/transcribe.py b/python/api/transcribe.py
index 8276a70a66..5abb9a0834 100644
--- a/python/api/transcribe.py
+++ b/python/api/transcribe.py
@@ -5,9 +5,11 @@
class Transcribe(ApiHandler):
async def process(self, input: dict, request: Request) -> dict | Response:
audio = input.get("audio")
- # ctxid = input.get("ctxid", "")
+ ctxid = input.get("ctxid", "")
+
+ if ctxid:
+ context = self.use_context(ctxid)
- # context = self.get_context(ctxid)
# if not await whisper.is_downloaded():
# context.log.log(type="info", content="Whisper STT model is currently being initialized, please wait...")
diff --git a/python/api/tunnel.py b/python/api/tunnel.py
index ca5d64f42d..edcf8ab55a 100644
--- a/python/api/tunnel.py
+++ b/python/api/tunnel.py
@@ -4,48 +4,62 @@
class Tunnel(ApiHandler):
async def process(self, input: dict, request: Request) -> dict | Response:
- action = input.get("action", "get")
-
- tunnel_manager = TunnelManager.get_instance()
+ return await process(input)
- if action == "health":
- return {"success": True}
-
- if action == "create":
- port = runtime.get_web_ui_port()
- provider = input.get("provider", "serveo") # Default to serveo
- tunnel_url = tunnel_manager.start_tunnel(port, provider)
- if tunnel_url is None:
- # Add a little delay and check again - tunnel might be starting
- import time
- time.sleep(2)
- tunnel_url = tunnel_manager.get_tunnel_url()
-
- return {
- "success": tunnel_url is not None,
- "tunnel_url": tunnel_url,
- "message": "Tunnel creation in progress" if tunnel_url is None else "Tunnel created successfully"
- }
-
- elif action == "stop":
- return self.stop()
-
- elif action == "get":
- tunnel_url = tunnel_manager.get_tunnel_url()
+async def process(input: dict) -> dict | Response:
+ action = input.get("action", "get")
+
+ tunnel_manager = TunnelManager.get_instance()
+
+ if action == "health":
+ return {"success": True}
+
+ if action == "create":
+ port = runtime.get_web_ui_port()
+ provider = input.get("provider", "serveo") # Default to serveo
+ tunnel_url = tunnel_manager.start_tunnel(port, provider)
+ error = tunnel_manager.get_last_error()
+ if error:
return {
- "success": tunnel_url is not None,
- "tunnel_url": tunnel_url,
- "is_running": tunnel_manager.is_running
+ "success": False,
+ "tunnel_url": None,
+ "message": error,
+ "notifications": tunnel_manager.get_notifications()
}
return {
- "success": False,
- "error": "Invalid action. Use 'create', 'stop', or 'get'."
- }
-
- def stop(self):
- tunnel_manager = TunnelManager.get_instance()
- tunnel_manager.stop_tunnel()
+ "success": tunnel_url is not None,
+ "tunnel_url": tunnel_url,
+ "notifications": tunnel_manager.get_notifications()
+ }
+
+ elif action == "stop":
+ return stop()
+
+ elif action == "get":
+ tunnel_url = tunnel_manager.get_tunnel_url()
return {
- "success": True
+ "success": tunnel_url is not None,
+ "tunnel_url": tunnel_url,
+ "is_running": tunnel_manager.is_running
}
+
+ elif action == "notifications":
+ return {
+ "success": True,
+ "notifications": tunnel_manager.get_notifications(),
+ "tunnel_url": tunnel_manager.get_tunnel_url(),
+ "is_running": tunnel_manager.is_running
+ }
+
+ return {
+ "success": False,
+ "error": "Invalid action. Use 'create', 'stop', 'get', or 'notifications'."
+ }
+
+def stop():
+ tunnel_manager = TunnelManager.get_instance()
+ tunnel_manager.stop_tunnel()
+ return {
+ "success": True
+ }
diff --git a/python/api/tunnel_proxy.py b/python/api/tunnel_proxy.py
index c8b1bd75b1..4df17893a6 100644
--- a/python/api/tunnel_proxy.py
+++ b/python/api/tunnel_proxy.py
@@ -6,30 +6,33 @@
class TunnelProxy(ApiHandler):
async def process(self, input: dict, request: Request) -> dict | Response:
- # Get configuration from environment
- tunnel_api_port = (
- runtime.get_arg("tunnel_api_port")
- or int(dotenv.get_dotenv_value("TUNNEL_API_PORT", 0))
- or 55520
- )
+ return await process(input)
- # first verify the service is running:
+async def process(input: dict) -> dict | Response:
+ # Get configuration from environment
+ tunnel_api_port = (
+ runtime.get_arg("tunnel_api_port")
+ or int(dotenv.get_dotenv_value("TUNNEL_API_PORT", 0))
+ or 55520
+ )
+
+ # first verify the service is running:
+ service_ok = False
+ try:
+ response = requests.post(f"http://localhost:{tunnel_api_port}/", json={"action": "health"})
+ if response.status_code == 200:
+ service_ok = True
+ except Exception as e:
service_ok = False
+
+ # forward this request to the tunnel service if OK
+ if service_ok:
try:
- response = requests.post(f"http://localhost:{tunnel_api_port}/", json={"action": "health"})
- if response.status_code == 200:
- service_ok = True
+ response = requests.post(f"http://localhost:{tunnel_api_port}/", json=input)
+ return response.json()
except Exception as e:
- service_ok = False
-
- # forward this request to the tunnel service if OK
- if service_ok:
- try:
- response = requests.post(f"http://localhost:{tunnel_api_port}/", json=input)
- return response.json()
- except Exception as e:
- return {"error": str(e)}
- else:
- # forward to API handler directly
- from python.api.tunnel import Tunnel
- return await Tunnel(self.app, self.thread_lock).process(input, request)
+ return {"error": str(e)}
+ else:
+ # forward to API handler directly
+ from python.api.tunnel import process as local_process
+ return await local_process(input)
diff --git a/python/extensions/agent_init/_10_initial_message.py b/python/extensions/agent_init/_10_initial_message.py
index f64a3fce44..65b5010fdb 100644
--- a/python/extensions/agent_init/_10_initial_message.py
+++ b/python/extensions/agent_init/_10_initial_message.py
@@ -35,7 +35,6 @@ async def execute(self, **kwargs):
# Add to log (green bubble) for immediate UI display
self.agent.context.log.log(
type="response",
- heading=f"{self.agent.agent_name}: Welcome",
content=initial_message_text,
finished=True,
update_progress="none",
diff --git a/python/extensions/agent_init/_15_load_profile_settings.py b/python/extensions/agent_init/_15_load_profile_settings.py
new file mode 100644
index 0000000000..d4c9b5ab42
--- /dev/null
+++ b/python/extensions/agent_init/_15_load_profile_settings.py
@@ -0,0 +1,53 @@
+from initialize import initialize_agent
+from python.helpers import dirty_json, files, subagents, projects
+from python.helpers.extension import Extension
+
+
+class LoadProfileSettings(Extension):
+
+ async def execute(self, **kwargs) -> None:
+
+ if not self.agent or not self.agent.config.profile:
+ return
+
+ config_files = subagents.get_paths(self.agent, "settings.json", include_default=False)
+
+ settings_override = {}
+ for settings_path in config_files:
+ if files.exists(settings_path):
+ try:
+ override_settings_str = files.read_file(settings_path)
+ override_settings = dirty_json.try_parse(override_settings_str)
+ if isinstance(override_settings, dict):
+ settings_override.update(override_settings)
+ else:
+ raise Exception(
+ f"Subordinate settings in {settings_path} must be a JSON object."
+ )
+ except Exception as e:
+ self.agent.context.log.log(
+ type="error",
+ content=(
+ f"Error loading subordinate settings from {settings_path} for "
+ f"profile '{self.agent.config.profile}': {e}"
+ ),
+ )
+
+ if settings_override:
+ # Preserve the original memory_subdir unless it's explicitly overridden
+ current_memory_subdir = self.agent.config.memory_subdir
+ new_config = initialize_agent(override_settings=settings_override)
+ if (
+ "agent_memory_subdir" not in settings_override
+ and current_memory_subdir != "default"
+ ):
+ new_config.memory_subdir = current_memory_subdir
+ self.agent.config = new_config
+ # self.agent.context.log.log(
+ # type="info",
+ # content=(
+ # "Loaded custom settings for agent "
+ # f"{self.agent.number} with profile '{self.agent.config.profile}'."
+ # ),
+ # )
+
diff --git a/python/extensions/banners/_10_unsecured_connection.py b/python/extensions/banners/_10_unsecured_connection.py
new file mode 100644
index 0000000000..89f5dcb6c5
--- /dev/null
+++ b/python/extensions/banners/_10_unsecured_connection.py
@@ -0,0 +1,63 @@
+from python.helpers.extension import Extension
+from python.helpers import dotenv
+import re
+
+
+class UnsecuredConnectionCheck(Extension):
+ """Check: non-local without credentials, or credentials over non-HTTPS."""
+
+ async def execute(self, banners: list = [], frontend_context: dict = {}, **kwargs):
+ hostname = frontend_context.get("hostname", "")
+ protocol = frontend_context.get("protocol", "")
+
+ auth_login = dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN, "")
+ auth_password = dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD, "")
+ has_credentials = bool(auth_login and auth_login.strip() and auth_password and auth_password.strip())
+
+ is_local = self._is_localhost(hostname)
+ is_https = protocol == "https:"
+
+ if not is_local and not has_credentials:
+ banners.append({
+ "id": "unsecured-connection",
+ "type": "warning",
+ "priority": 80,
+ "title": "Unsecured Connection",
+ "html": """You are accessing Agent Zero from a non-local address without authentication.
+
+ Configure credentials in Settings β External Services β Authentication.""",
+ "dismissible": True,
+ "source": "backend"
+ })
+
+ if has_credentials and not is_local and not is_https:
+ banners.append({
+ "id": "credentials-unencrypted",
+ "type": "warning",
+ "priority": 90,
+ "title": "Credentials May Be Sent Unencrypted",
+ "html": """Your connection is not using HTTPS. Login credentials may be transmitted in plain text.
+ Consider using HTTPS or a secure tunnel.""",
+ "dismissible": True,
+ "source": "backend"
+ })
+
+ def _is_localhost(self, hostname: str) -> bool:
+ local_patterns = ["localhost", "127.0.0.1", "::1", "0.0.0.0"]
+
+ if hostname in local_patterns:
+ return True
+
+ # RFC1918 private ranges
+ if re.match(r"^192\.168\.\d{1,3}\.\d{1,3}$", hostname):
+ return True
+ if re.match(r"^10\.\d{1,3}\.\d{1,3}\.\d{1,3}$", hostname):
+ return True
+ if re.match(r"^172\.(1[6-9]|2\d|3[01])\.\d{1,3}\.\d{1,3}$", hostname):
+ return True
+
+ # .local domains
+ if hostname.endswith(".local"):
+ return True
+
+ return False
diff --git a/python/extensions/banners/_20_missing_api_key.py b/python/extensions/banners/_20_missing_api_key.py
new file mode 100644
index 0000000000..9d31a5b509
--- /dev/null
+++ b/python/extensions/banners/_20_missing_api_key.py
@@ -0,0 +1,64 @@
+from python.helpers.extension import Extension
+from python.helpers import settings as settings_helper
+import models
+
+
+class MissingApiKeyCheck(Extension):
+ """Check if API keys are configured for selected model providers."""
+
+ LOCAL_PROVIDERS = ["ollama", "lm_studio"]
+ LOCAL_EMBEDDING = ["huggingface"]
+ MODEL_TYPE_NAMES = {
+ "chat": "Chat Model",
+ "utility": "Utility Model",
+ "browser": "Web Browser Model",
+ "embedding": "Embedding Model",
+ }
+
+ async def execute(self, banners: list = [], frontend_context: dict = {}, **kwargs):
+ current_settings = settings_helper.get_settings()
+ model_providers = {
+ "chat": current_settings.get("chat_model_provider", ""),
+ "utility": current_settings.get("util_model_provider", ""),
+ "browser": current_settings.get("browser_model_provider", ""),
+ "embedding": current_settings.get("embed_model_provider", ""),
+ }
+
+ missing_providers = []
+
+ for model_type, provider in model_providers.items():
+ if not provider:
+ continue
+
+ provider_lower = provider.lower()
+ if provider_lower in self.LOCAL_PROVIDERS:
+ continue
+ if model_type == "embedding" and provider_lower in self.LOCAL_EMBEDDING:
+ continue
+
+ api_key = models.get_api_key(provider_lower)
+ if not (api_key and api_key.strip() and api_key != "None"):
+ missing_providers.append({
+ "model_type": self.MODEL_TYPE_NAMES.get(model_type, model_type),
+ "provider": provider,
+ })
+
+ if not missing_providers:
+ return
+
+ model_list = ", ".join(
+ f"{p['model_type']} ({p['provider']})" for p in missing_providers
+ )
+
+ banners.append({
+ "id": "missing-api-key",
+ "type": "error",
+ "priority": 100,
+ "title": "Missing API Key",
+ "html": f"""No API key configured for: {model_list}.
+ Agent Zero will not be able to function properly.
+
+ Add your API key in Settings β External Services β API Keys.""",
+ "dismissible": False,
+ "source": "backend"
+ })
diff --git a/python/extensions/before_main_llm_call/_10_log_for_stream.py b/python/extensions/before_main_llm_call/_10_log_for_stream.py
index 49b04b7b7b..6618a0a47f 100644
--- a/python/extensions/before_main_llm_call/_10_log_for_stream.py
+++ b/python/extensions/before_main_llm_call/_10_log_for_stream.py
@@ -19,8 +19,10 @@ async def execute(self, loop_data: LoopData = LoopData(), text: str = "", **kwar
)
)
-def build_heading(agent, text: str):
- return f"icon://network_intelligence {agent.agent_name}: {text}"
+def build_heading(agent, text: str, icon: str = "network_intelligence"):
+ # Include agent identifier for all agents (A0:, A1:, A2:, etc.)
+ agent_prefix = f"{agent.agent_name}: "
+ return f"icon://{icon} {agent_prefix}{text}"
def build_default_heading(agent):
return build_heading(agent, "Generating...")
\ No newline at end of file
diff --git a/python/extensions/error_format/_10_mask_errors.py b/python/extensions/error_format/_10_mask_errors.py
index e685fdbbc5..f90cf77730 100644
--- a/python/extensions/error_format/_10_mask_errors.py
+++ b/python/extensions/error_format/_10_mask_errors.py
@@ -1,5 +1,5 @@
from python.helpers.extension import Extension
-from python.helpers.secrets import SecretsManager
+from python.helpers.secrets import get_secrets_manager
class MaskErrorSecrets(Extension):
@@ -10,7 +10,7 @@ async def execute(self, **kwargs):
if not msg:
return
- secrets_mgr = SecretsManager.get_instance()
+ secrets_mgr = get_secrets_manager(self.agent.context)
# Mask the error message
if "message" in msg:
diff --git a/python/extensions/hist_add_before/_10_mask_content.py b/python/extensions/hist_add_before/_10_mask_content.py
index 279c81b146..a59006e62f 100644
--- a/python/extensions/hist_add_before/_10_mask_content.py
+++ b/python/extensions/hist_add_before/_10_mask_content.py
@@ -1,4 +1,5 @@
from python.helpers.extension import Extension
+from python.helpers.secrets import get_secrets_manager
class MaskHistoryContent(Extension):
@@ -10,8 +11,7 @@ async def execute(self, **kwargs):
return
try:
- from python.helpers.secrets import SecretsManager
- secrets_mgr = SecretsManager.get_instance()
+ secrets_mgr = get_secrets_manager(self.agent.context)
# Mask the content before adding to history
content_data["content"] = self._mask_content(content_data["content"], secrets_mgr)
diff --git a/python/extensions/message_loop_prompts_after/_75_include_project_extras.py b/python/extensions/message_loop_prompts_after/_75_include_project_extras.py
new file mode 100644
index 0000000000..87bef95be6
--- /dev/null
+++ b/python/extensions/message_loop_prompts_after/_75_include_project_extras.py
@@ -0,0 +1,47 @@
+from python.helpers.extension import Extension
+from agent import LoopData
+from python.helpers import projects
+
+
+class IncludeProjectExtras(Extension):
+ async def execute(self, loop_data: LoopData = LoopData(), **kwargs):
+
+ # active project
+ project_name = projects.get_context_project_name(self.agent.context)
+ if not project_name:
+ return
+
+ # project config
+ project = projects.load_basic_project_data(project_name)
+
+ # load file structure if enabled
+ if project["file_structure"]["enabled"]:
+ file_structure = projects.get_file_structure(project_name)
+ gitignore = cleanup_gitignore(project["file_structure"]["gitignore"])
+
+ # read prompt
+ file_structure_prompt = self.agent.read_prompt(
+ "agent.extras.project.file_structure.md",
+ max_depth=project["file_structure"]["max_depth"],
+ gitignore=gitignore,
+ project_name=project_name,
+ file_structure=file_structure,
+ )
+ # add file structure to the prompt
+ loop_data.extras_temporary["project_file_structure"] = file_structure_prompt
+
+
+def cleanup_gitignore(gitignore_raw: str) -> str:
+ """Process gitignore: split lines, strip, remove comments, remove empty lines."""
+ gitignore_lines = []
+ for line in gitignore_raw.split('\n'):
+ # Strip whitespace
+ line = line.strip()
+ # Remove inline comments (everything after #)
+ if '#' in line:
+ line = line.split('#')[0].strip()
+ # Keep only non-empty lines
+ if line:
+ gitignore_lines.append(line)
+
+ return '\n'.join(gitignore_lines) if gitignore_lines else "nothing ignored"
diff --git a/python/extensions/reasoning_stream_chunk/_10_mask_stream.py b/python/extensions/reasoning_stream_chunk/_10_mask_stream.py
index aef15624a5..07459e64cd 100644
--- a/python/extensions/reasoning_stream_chunk/_10_mask_stream.py
+++ b/python/extensions/reasoning_stream_chunk/_10_mask_stream.py
@@ -1,4 +1,5 @@
from python.helpers.extension import Extension
+from python.helpers.secrets import get_secrets_manager
class MaskReasoningStreamChunk(Extension):
@@ -10,8 +11,7 @@ async def execute(self, **kwargs):
return
try:
- from python.helpers.secrets import SecretsManager
- secrets_mgr = SecretsManager.get_instance()
+ secrets_mgr = get_secrets_manager(self.agent.context)
# Initialize filter if not exists
filter_key = "_reason_stream_filter"
diff --git a/python/extensions/response_stream/_10_log_from_stream.py b/python/extensions/response_stream/_10_log_from_stream.py
index ace6baf547..375b96c032 100644
--- a/python/extensions/response_stream/_10_log_from_stream.py
+++ b/python/extensions/response_stream/_10_log_from_stream.py
@@ -22,12 +22,9 @@ async def execute(
if "headline" in parsed:
heading = build_heading(self.agent, parsed['headline'])
elif "tool_name" in parsed:
- heading = build_heading(self.agent, f"Using tool {parsed['tool_name']}") # if the llm skipped headline
+ heading = build_heading(self.agent, f"Using {parsed['tool_name']}") # if the llm skipped headline
elif "thoughts" in parsed:
- # thought length indicator
- thoughts = "\n".join(parsed["thoughts"])
- pipes = "|" * math.ceil(math.sqrt(len(thoughts)))
- heading = build_heading(self.agent, f"Thinking... {pipes}")
+ heading = build_default_heading(self.agent)
# create log message and store it in loop data temporary params
if "log_item_generating" not in loop_data.params_temporary:
diff --git a/python/extensions/response_stream_chunk/_10_mask_stream.py b/python/extensions/response_stream_chunk/_10_mask_stream.py
index fb52c174a7..fe7eb3dd76 100644
--- a/python/extensions/response_stream_chunk/_10_mask_stream.py
+++ b/python/extensions/response_stream_chunk/_10_mask_stream.py
@@ -1,6 +1,6 @@
from python.helpers.extension import Extension
-from python.helpers.secrets import SecretsManager
from agent import Agent, LoopData
+from python.helpers.secrets import get_secrets_manager
class MaskResponseStreamChunk(Extension):
@@ -13,8 +13,7 @@ async def execute(self, **kwargs):
return
try:
- from python.helpers.secrets import SecretsManager
- secrets_mgr = SecretsManager.get_instance()
+ secrets_mgr = get_secrets_manager(self.agent.context)
# Initialize filter if not exists
filter_key = "_resp_stream_filter"
diff --git a/python/extensions/system_prompt/_10_system_prompt.py b/python/extensions/system_prompt/_10_system_prompt.py
index eb0089779e..9a17c0ad9a 100644
--- a/python/extensions/system_prompt/_10_system_prompt.py
+++ b/python/extensions/system_prompt/_10_system_prompt.py
@@ -3,16 +3,23 @@
from python.helpers.mcp_handler import MCPConfig
from agent import Agent, LoopData
from python.helpers.settings import get_settings
+from python.helpers import projects
class SystemPrompt(Extension):
- async def execute(self, system_prompt: list[str] = [], loop_data: LoopData = LoopData(), **kwargs: Any):
+ async def execute(
+ self,
+ system_prompt: list[str] = [],
+ loop_data: LoopData = LoopData(),
+ **kwargs: Any
+ ):
# append main system prompt and tools
main = get_main_prompt(self.agent)
tools = get_tools_prompt(self.agent)
mcp_tools = get_mcp_tools_prompt(self.agent)
secrets_prompt = get_secrets_prompt(self.agent)
+ project_prompt = get_project_prompt(self.agent)
system_prompt.append(main)
system_prompt.append(tools)
@@ -20,6 +27,8 @@ async def execute(self, system_prompt: list[str] = [], loop_data: LoopData = Loo
system_prompt.append(mcp_tools)
if secrets_prompt:
system_prompt.append(secrets_prompt)
+ if project_prompt:
+ system_prompt.append(project_prompt)
def get_main_prompt(agent: Agent):
@@ -29,7 +38,7 @@ def get_main_prompt(agent: Agent):
def get_tools_prompt(agent: Agent):
prompt = agent.read_prompt("agent.system.tools.md")
if agent.config.chat_model.vision:
- prompt += '\n\n' + agent.read_prompt("agent.system.tools_vision.md")
+ prompt += "\n\n" + agent.read_prompt("agent.system.tools_vision.md")
return prompt
@@ -37,7 +46,9 @@ def get_mcp_tools_prompt(agent: Agent):
mcp_config = MCPConfig.get_instance()
if mcp_config.servers:
pre_progress = agent.context.log.progress
- agent.context.log.set_progress("Collecting MCP tools") # MCP might be initializing, better inform via progress bar
+ agent.context.log.set_progress(
+ "Collecting MCP tools"
+ ) # MCP might be initializing, better inform via progress bar
tools = MCPConfig.get_instance().get_tools_prompt()
agent.context.log.set_progress(pre_progress) # return original progress
return tools
@@ -47,11 +58,25 @@ def get_mcp_tools_prompt(agent: Agent):
def get_secrets_prompt(agent: Agent):
try:
# Use lazy import to avoid circular dependencies
- from python.helpers.secrets import SecretsManager
- secrets_manager = SecretsManager.get_instance()
+ from python.helpers.secrets import get_secrets_manager
+
+ secrets_manager = get_secrets_manager(agent.context)
secrets = secrets_manager.get_secrets_for_prompt()
vars = get_settings()["variables"]
return agent.read_prompt("agent.system.secrets.md", secrets=secrets, vars=vars)
except Exception as e:
# If secrets module is not available or has issues, return empty string
return ""
+
+
+def get_project_prompt(agent: Agent):
+ result = agent.read_prompt("agent.system.projects.main.md")
+ project_name = agent.context.get_data(projects.CONTEXT_DATA_KEY_PROJECT)
+ if project_name:
+ project_vars = projects.build_system_prompt_vars(project_name)
+ result += "\n\n" + agent.read_prompt(
+ "agent.system.projects.active.md", **project_vars
+ )
+ else:
+ result += "\n\n" + agent.read_prompt("agent.system.projects.inactive.md")
+ return result
diff --git a/python/extensions/system_prompt/_20_behaviour_prompt.py b/python/extensions/system_prompt/_20_behaviour_prompt.py
index 5c451eba74..1650e9ee8c 100644
--- a/python/extensions/system_prompt/_20_behaviour_prompt.py
+++ b/python/extensions/system_prompt/_20_behaviour_prompt.py
@@ -11,7 +11,7 @@ async def execute(self, system_prompt: list[str]=[], loop_data: LoopData = LoopD
system_prompt.insert(0, prompt) #.append(prompt)
def get_custom_rules_file(agent: Agent):
- return memory.get_memory_subdir_abs(agent) + f"/behaviour.md"
+ return files.get_abs_path(memory.get_memory_subdir_abs(agent), "behaviour.md")
def read_rules(agent: Agent):
rules_file = get_custom_rules_file(agent)
diff --git a/python/extensions/tool_execute_after/_10_mask_secrets.py b/python/extensions/tool_execute_after/_10_mask_secrets.py
index 1055364c4b..ae2cdc4efa 100644
--- a/python/extensions/tool_execute_after/_10_mask_secrets.py
+++ b/python/extensions/tool_execute_after/_10_mask_secrets.py
@@ -1,5 +1,5 @@
from python.helpers.extension import Extension
-from python.helpers.secrets import SecretsManager
+from python.helpers.secrets import get_secrets_manager
from python.helpers.tool import Response
@@ -8,5 +8,5 @@ class MaskToolSecrets(Extension):
async def execute(self, response: Response | None = None, **kwargs):
if not response:
return
- secrets_mgr = SecretsManager.get_instance()
+ secrets_mgr = get_secrets_manager(self.agent.context)
response.message = secrets_mgr.mask_values(response.message)
diff --git a/python/extensions/tool_execute_before/_10_unmask_secrets.py b/python/extensions/tool_execute_before/_10_unmask_secrets.py
index 347d67d1b4..9025812291 100644
--- a/python/extensions/tool_execute_before/_10_unmask_secrets.py
+++ b/python/extensions/tool_execute_before/_10_unmask_secrets.py
@@ -1,5 +1,5 @@
from python.helpers.extension import Extension
-from python.helpers.secrets import SecretsManager
+from python.helpers.secrets import get_secrets_manager
class UnmaskToolSecrets(Extension):
@@ -10,7 +10,7 @@ async def execute(self, **kwargs):
if not tool_args:
return
- secrets_mgr = SecretsManager.get_instance()
+ secrets_mgr = get_secrets_manager(self.agent.context)
# Unmask placeholders in args for actual tool execution
for k, v in tool_args.items():
diff --git a/python/extensions/user_message_ui/_10_update_check.py b/python/extensions/user_message_ui/_10_update_check.py
new file mode 100644
index 0000000000..ad967f44d2
--- /dev/null
+++ b/python/extensions/user_message_ui/_10_update_check.py
@@ -0,0 +1,58 @@
+from python.helpers import notification
+from python.helpers.extension import Extension
+from agent import LoopData
+from python.helpers import settings, update_check
+import datetime
+
+
+# check for newer versions of A0 available and send notification
+# check after user message is sent from UI, not API, MCP etc. (user is active and can see the notification)
+# do not check too often, use cooldown
+# do not notify too often unless there's a different notification
+
+last_check = datetime.datetime.fromtimestamp(0)
+check_cooldown_seconds = 60
+last_notification_id = ""
+last_notification_time = datetime.datetime.fromtimestamp(0)
+notification_cooldown_seconds = 60 * 60 * 24
+
+class UpdateCheck(Extension):
+
+ async def execute(self, loop_data: LoopData = LoopData(), text: str = "", **kwargs):
+ try:
+ global last_check, last_notification_id, last_notification_time
+
+ # first check if update check is enabled
+ current_settings = settings.get_settings()
+ if not current_settings["update_check_enabled"]:
+ return
+
+ # check if cooldown has passed
+ if (datetime.datetime.now() - last_check).total_seconds() < check_cooldown_seconds:
+ return
+ last_check = datetime.datetime.now()
+
+ # check for updates
+ version = await update_check.check_version()
+
+ # if the user should update, send notification
+ if notif := version.get("notification"):
+ if notif.get("id") != last_notification_id or (datetime.datetime.now() - last_notification_time).total_seconds() > notification_cooldown_seconds:
+ last_notification_id = notif.get("id")
+ last_notification_time = datetime.datetime.now()
+ self.send_notification(notif)
+ except Exception as e:
+ pass # no need to log if the update server is inaccessible
+
+
+ def send_notification(self, notif):
+ notifs = self.agent.context.get_notification_manager()
+ notifs.send_notification(
+ title=notif.get("title", "Newer version available"),
+ message=notif.get("message", "A newer version of Agent Zero is available. Please update to the latest version."),
+ type=notif.get("type", "info"),
+ detail=notif.get("detail", ""),
+ display_time=notif.get("display_time", 10),
+ group=notif.get("group", "update_check"),
+ priority=notif.get("priority", notification.NotificationPriority.NORMAL),
+ )
diff --git a/python/extensions/util_model_call_before/_10_mask_secrets.py b/python/extensions/util_model_call_before/_10_mask_secrets.py
index beeffe950f..df23ff1f66 100644
--- a/python/extensions/util_model_call_before/_10_mask_secrets.py
+++ b/python/extensions/util_model_call_before/_10_mask_secrets.py
@@ -1,5 +1,5 @@
from python.helpers.extension import Extension
-from python.helpers.secrets import SecretsManager
+from python.helpers.secrets import get_secrets_manager
class MaskToolSecrets(Extension):
@@ -8,7 +8,7 @@ async def execute(self, **kwargs):
# model call data
call_data:dict = kwargs.get("call_data", {})
- secrets_mgr = SecretsManager.get_instance()
+ secrets_mgr = get_secrets_manager(self.agent.context)
# mask system and user message
if system:=call_data.get("system"):
diff --git a/python/helpers/api.py b/python/helpers/api.py
index 5d0e171f58..6c90c6e566 100644
--- a/python/helpers/api.py
+++ b/python/helpers/api.py
@@ -57,7 +57,9 @@ async def handle_request(self, request: Request) -> Response:
PrintStyle().print(f"Error parsing JSON: {str(e)}")
input_data = {}
else:
- input_data = {"data": request.get_data(as_text=True)}
+ # input_data = {"data": request.get_data(as_text=True)}
+ input_data = {}
+
# process via handler
output = await self.process(input_data, request)
@@ -78,14 +80,21 @@ async def handle_request(self, request: Request) -> Response:
return Response(response=error, status=500, mimetype="text/plain")
# get context to run agent zero in
- def get_context(self, ctxid: str):
+ def use_context(self, ctxid: str, create_if_not_exists: bool = True):
with self.thread_lock:
if not ctxid:
first = AgentContext.first()
if first:
+ AgentContext.use(first.id)
return first
- return AgentContext(config=initialize_agent())
- got = AgentContext.get(ctxid)
+ context = AgentContext(config=initialize_agent(), set_current=True)
+ return context
+ got = AgentContext.use(ctxid)
if got:
return got
- return AgentContext(config=initialize_agent(), id=ctxid)
+ if create_if_not_exists:
+ context = AgentContext(config=initialize_agent(), id=ctxid, set_current=True)
+ return context
+ else:
+ raise Exception(f"Context {ctxid} not found")
+
diff --git a/python/helpers/backup.py b/python/helpers/backup.py
index 81f78fe5eb..4e4873371d 100644
--- a/python/helpers/backup.py
+++ b/python/helpers/backup.py
@@ -75,9 +75,14 @@ def _get_default_patterns(self) -> str:
# Configuration and Settings (CRITICAL)
{agent_root}/.env
{agent_root}/tmp/settings.json
+{agent_root}/tmp/secrets.env
{agent_root}/tmp/chats/**
{agent_root}/tmp/scheduler/**
-{agent_root}/tmp/uploads/**"""
+{agent_root}/tmp/uploads/**
+
+# User data
+{agent_root}/usr/**
+"""
def _get_agent_zero_version(self) -> str:
"""Get current Agent Zero version"""
diff --git a/python/helpers/context.py b/python/helpers/context.py
new file mode 100644
index 0000000000..2dc5609f0e
--- /dev/null
+++ b/python/helpers/context.py
@@ -0,0 +1,46 @@
+from contextvars import ContextVar
+from typing import Any, TypeVar, cast, Optional, Dict
+
+T = TypeVar("T")
+
+# no mutable default β None is safe
+_context_data: ContextVar[Optional[Dict[str, Any]]] = ContextVar("_context_data", default=None)
+
+
+def _ensure_context() -> Dict[str, Any]:
+ """Make sure a context dict exists, and return it."""
+ data = _context_data.get()
+ if data is None:
+ data = {}
+ _context_data.set(data)
+ return data
+
+
+def set_context_data(key: str, value: Any):
+ """Set context data for the current async/task context."""
+ data = _ensure_context()
+ if data.get(key) == value:
+ return
+ data[key] = value
+ _context_data.set(data)
+
+
+def delete_context_data(key: str):
+ """Delete a key from the current async/task context."""
+ data = _ensure_context()
+ if key in data:
+ del data[key]
+ _context_data.set(data)
+
+
+def get_context_data(key: Optional[str] = None, default: T = None) -> T:
+ """Get a key from the current context, or the full dict if key is None."""
+ data = _ensure_context()
+ if key is None:
+ return cast(T, data)
+ return cast(T, data.get(key, default))
+
+
+def clear_context_data():
+ """Completely clear the context dict."""
+ _context_data.set({})
diff --git a/python/helpers/defer.py b/python/helpers/defer.py
index dc96efe5aa..8c2c7e86ef 100644
--- a/python/helpers/defer.py
+++ b/python/helpers/defer.py
@@ -6,8 +6,9 @@
T = TypeVar("T")
+
class EventLoopThread:
- _instances = {}
+ _instances: dict[str, "EventLoopThread"] = {}
_lock = threading.Lock()
def __init__(self, thread_name: str = "Background") -> None:
@@ -38,8 +39,29 @@ def _run_event_loop(self):
self.loop.run_forever()
def terminate(self):
- if self.loop and self.loop.is_running():
- self.loop.stop()
+ loop = getattr(self, "loop", None)
+ thread = getattr(self, "thread", None)
+
+ if not loop:
+ return
+
+ if loop.is_running():
+ if thread and thread is threading.current_thread():
+ loop.stop()
+ else:
+ loop.call_soon_threadsafe(loop.stop)
+ if thread:
+ thread.join()
+ elif thread and thread.is_alive() and thread is not threading.current_thread():
+ thread.join()
+
+ if not loop.is_closed():
+ loop.close()
+
+ with self.__class__._lock:
+ if self.thread_name in self.__class__._instances:
+ del self.__class__._instances[self.thread_name]
+
self.loop = None
self.thread = None
@@ -79,6 +101,12 @@ def __del__(self):
def _start_task(self):
self._future = self.event_loop_thread.run_coroutine(self._run())
+ if self._future:
+ self._future.add_done_callback(self._on_task_done)
+
+ def _on_task_done(self, _future: Future):
+ # Ensure child background tasks are always cleaned up once the parent finishes
+ self.kill_children()
async def _run(self):
return await self.func(*self.args, **self.kwargs)
@@ -120,30 +148,16 @@ def kill(self, terminate_thread: bool = False) -> None:
if self._future and not self._future.done():
self._future.cancel()
- if (
- terminate_thread
- and self.event_loop_thread.loop
- and self.event_loop_thread.loop.is_running()
- ):
-
- def cleanup():
- tasks = [
- t
- for t in asyncio.all_tasks(self.event_loop_thread.loop)
- if t is not asyncio.current_task(self.event_loop_thread.loop)
- ]
- for task in tasks:
- task.cancel()
- try:
- # Give tasks a chance to cleanup
- if self.event_loop_thread.loop:
- self.event_loop_thread.loop.run_until_complete(
- asyncio.gather(task, return_exceptions=True)
- )
- except Exception:
- pass # Ignore cleanup errors
-
- self.event_loop_thread.loop.call_soon_threadsafe(cleanup)
+ if terminate_thread and self.event_loop_thread.loop:
+ if self.event_loop_thread.loop.is_running():
+ try:
+ cleanup_future = asyncio.run_coroutine_threadsafe(
+ self._drain_event_loop_tasks(), self.event_loop_thread.loop
+ )
+ cleanup_future.result()
+ except Exception:
+ pass
+
self.event_loop_thread.terminate()
def kill_children(self) -> None:
@@ -196,3 +210,19 @@ async def wrapped():
asyncio.run_coroutine_threadsafe(wrapped(), self.event_loop_thread.loop)
return asyncio.wrap_future(future)
+
+ @staticmethod
+ async def _drain_event_loop_tasks():
+ """Cancel and await all pending tasks on the current event loop."""
+ loop = asyncio.get_running_loop()
+ current_task = asyncio.current_task(loop=loop)
+ pending = [
+ task
+ for task in asyncio.all_tasks(loop=loop)
+ if task is not current_task
+ ]
+ if not pending:
+ return
+ for task in pending:
+ task.cancel()
+ await asyncio.gather(*pending, return_exceptions=True)
diff --git a/python/helpers/document_query.py b/python/helpers/document_query.py
index d60a86574e..6ba38855c6 100644
--- a/python/helpers/document_query.py
+++ b/python/helpers/document_query.py
@@ -361,15 +361,22 @@ def __init__(
self.progress_callback = progress_callback or (lambda x: None)
async def document_qa(
- self, document_uri: str, questions: Sequence[str]
+ self, document_uris: List[str], questions: Sequence[str]
) -> Tuple[bool, str]:
- self.progress_callback(f"Starting Q&A process")
+ self.progress_callback(
+ f"Starting Q&A process for {len(document_uris)} documents"
+ )
+ await self.agent.handle_intervention()
- # index document
- _ = await self.document_get_content(document_uri, True)
+ # index documents
+ await asyncio.gather(
+ *[self.document_get_content(uri, True) for uri in document_uris]
+ )
+ await self.agent.handle_intervention()
selected_chunks = {}
for question in questions:
self.progress_callback(f"Optimizing query: {question}")
+ await self.agent.handle_intervention()
human_content = f'Search Query: "{question}"'
system_content = self.agent.parse_prompt(
"fw.document_query.optmimize_query.md"
@@ -381,14 +388,19 @@ async def document_qa(
)
).strip()
- self.progress_callback(f"Searching document with query: {optimized_query}")
+ await self.agent.handle_intervention()
+ self.progress_callback(f"Searching documents with query: {optimized_query}")
+
+ normalized_uris = [self.store.normalize_uri(uri) for uri in document_uris]
+ doc_filter = " or ".join(
+ [f"document_uri == '{uri}'" for uri in normalized_uris]
+ )
- normalized_uri = self.store.normalize_uri(document_uri)
- chunks = await self.store.search_document(
- document_uri=normalized_uri,
+ chunks = await self.store.search_documents(
query=optimized_query,
limit=100,
threshold=DEFAULT_SEARCH_THRESHOLD,
+ filter=doc_filter,
)
self.progress_callback(f"Found {len(chunks)} chunks")
@@ -397,13 +409,14 @@ async def document_qa(
selected_chunks[chunk.metadata["id"]] = chunk
if not selected_chunks:
- self.progress_callback(f"No relevant content found in the document")
- content = f"!!! No content found for document: {document_uri} matching queries: {json.dumps(questions)}"
+ self.progress_callback("No relevant content found in the documents")
+ content = f"!!! No content found for documents: {json.dumps(document_uris)} matching queries: {json.dumps(questions)}"
return False, content
self.progress_callback(
f"Processing {len(questions)} questions in context of {len(selected_chunks)} chunks"
)
+ await self.agent.handle_intervention()
questions_str = "\n".join([f" * {question}" for question in questions])
content = "\n\n----\n\n".join(
@@ -430,6 +443,7 @@ async def document_get_content(
self, document_uri: str, add_to_db: bool = False
) -> str:
self.progress_callback(f"Fetching document content")
+ await self.agent.handle_intervention()
url = urlparse(document_uri)
scheme = url.scheme or "file"
mimetype, encoding = mimetypes.guess_type(document_uri)
@@ -455,6 +469,7 @@ async def document_get_content(
await asyncio.sleep(1)
last_error = str(e)
retries += 1
+ await self.agent.handle_intervention()
if not response:
raise ValueError(
@@ -492,9 +507,11 @@ async def document_get_content(
# Use the store's normalization method
document_uri_norm = self.store.normalize_uri(document_uri)
+ await self.agent.handle_intervention()
exists = await self.store.document_exists(document_uri_norm)
document_content = ""
if not exists:
+ await self.agent.handle_intervention()
if mimetype.startswith("image/"):
document_content = self.handle_image_document(document_uri, scheme)
elif mimetype == "text/html":
@@ -509,6 +526,7 @@ async def document_get_content(
)
if add_to_db:
self.progress_callback(f"Indexing document")
+ await self.agent.handle_intervention()
success, ids = await self.store.add_document(
document_content, document_uri_norm
)
@@ -519,6 +537,7 @@ async def document_get_content(
)
self.progress_callback(f"Indexed {len(ids)} chunks")
else:
+ await self.agent.handle_intervention()
doc = await self.store.get_document(document_uri_norm)
if doc:
document_content = doc.page_content
diff --git a/python/helpers/email_client.py b/python/helpers/email_client.py
new file mode 100644
index 0000000000..741e008991
--- /dev/null
+++ b/python/helpers/email_client.py
@@ -0,0 +1,587 @@
+import asyncio
+import email
+import os
+import re
+import uuid
+from dataclasses import dataclass
+from email.header import decode_header
+from email.message import Message as EmailMessage
+from fnmatch import fnmatch
+from typing import Any, Dict, List, Optional, Tuple
+
+import html2text
+from bs4 import BeautifulSoup
+from imapclient import IMAPClient
+
+from python.helpers import files
+from python.helpers.errors import RepairableException, format_error
+from python.helpers.print_style import PrintStyle
+
+
+@dataclass
+class Message:
+ """Email message representation with sender, subject, body, and attachments."""
+ sender: str
+ subject: str
+ body: str
+ attachments: List[str]
+
+
+class EmailClient:
+ """
+ Async email client for reading messages from IMAP and Exchange servers.
+
+ """
+
+ def __init__(
+ self,
+ account_type: str = "imap",
+ server: str = "",
+ port: int = 993,
+ username: str = "",
+ password: str = "",
+ options: Optional[Dict[str, Any]] = None,
+ ):
+ """
+ Initialize email client with connection parameters.
+
+ Args:
+ account_type: Type of account - "imap" or "exchange"
+ server: Mail server address (e.g., "imap.gmail.com")
+ port: Server port (default 993 for IMAP SSL)
+ username: Email account username
+ password: Email account password
+ options: Optional configuration dict with keys:
+ - ssl: Use SSL/TLS (default: True)
+ - timeout: Connection timeout in seconds (default: 30)
+ """
+ self.account_type = account_type.lower()
+ self.server = server
+ self.port = port
+ self.username = username
+ self.password = password
+ self.options = options or {}
+
+ # Default options
+ self.ssl = self.options.get("ssl", True)
+ self.timeout = self.options.get("timeout", 30)
+
+ self.client: Optional[IMAPClient] = None
+ self.exchange_account = None
+
+ async def connect(self) -> None:
+ """Establish connection to email server."""
+ try:
+ if self.account_type == "imap":
+ await self._connect_imap()
+ elif self.account_type == "exchange":
+ await self._connect_exchange()
+ else:
+ raise RepairableException(
+ f"Unsupported account type: {self.account_type}. "
+ "Supported types: 'imap', 'exchange'"
+ )
+ except Exception as e:
+ err = format_error(e)
+ PrintStyle.error(f"Failed to connect to email server: {err}")
+ raise RepairableException(f"Email connection failed: {err}") from e
+
+ async def _connect_imap(self) -> None:
+ """Establish IMAP connection."""
+ loop = asyncio.get_event_loop()
+
+ def _sync_connect():
+ client = IMAPClient(self.server, port=self.port, ssl=self.ssl, timeout=self.timeout)
+ # Increase line length limit to handle large emails (default is 10000)
+ # This fixes "line too long" errors for emails with large headers or embedded content
+ client._imap._maxline = 100000
+ client.login(self.username, self.password)
+ return client
+
+ self.client = await loop.run_in_executor(None, _sync_connect)
+ PrintStyle.standard(f"Connected to IMAP server: {self.server}")
+
+ async def _connect_exchange(self) -> None:
+ """Establish Exchange connection."""
+ try:
+ from exchangelib import Account, Configuration, Credentials, DELEGATE
+
+ loop = asyncio.get_event_loop()
+
+ def _sync_connect():
+ creds = Credentials(username=self.username, password=self.password)
+ config = Configuration(server=self.server, credentials=creds)
+ return Account(
+ primary_smtp_address=self.username,
+ config=config,
+ autodiscover=False,
+ access_type=DELEGATE
+ )
+
+ self.exchange_account = await loop.run_in_executor(None, _sync_connect)
+ PrintStyle.standard(f"Connected to Exchange server: {self.server}")
+ except ImportError as e:
+ raise RepairableException(
+ "exchangelib not installed. Install with: pip install exchangelib>=5.4.3"
+ ) from e
+
+ async def disconnect(self) -> None:
+ """Clean up connection."""
+ try:
+ if self.client:
+ loop = asyncio.get_event_loop()
+ await loop.run_in_executor(None, self.client.logout)
+ self.client = None
+ PrintStyle.standard("Disconnected from IMAP server")
+ elif self.exchange_account:
+ self.exchange_account = None
+ PrintStyle.standard("Disconnected from Exchange server")
+ except Exception as e:
+ PrintStyle.error(f"Error during disconnect: {format_error(e)}")
+
+ async def read_messages(
+ self,
+ download_folder: str,
+ filter: Optional[Dict[str, Any]] = None,
+ ) -> List[Message]:
+ """
+ Read messages based on filter criteria.
+
+ Args:
+ download_folder: Folder to save attachments (relative to /a0/)
+ filter: Filter criteria dict with keys:
+ - unread: Boolean to filter unread messages (default: True)
+ - sender: Sender pattern with wildcards (e.g., "*@company.com")
+ - subject: Subject pattern with wildcards (e.g., "*invoice*")
+ - since_date: Optional datetime for date filtering
+
+ Returns:
+ List of Message objects with attachments saved to download_folder
+ """
+ filter = filter or {}
+
+ if self.account_type == "imap":
+ return await self._fetch_imap_messages(download_folder, filter)
+ elif self.account_type == "exchange":
+ return await self._fetch_exchange_messages(download_folder, filter)
+ else:
+ raise RepairableException(f"Unsupported account type: {self.account_type}")
+
+ async def _fetch_imap_messages(
+ self,
+ download_folder: str,
+ filter: Dict[str, Any],
+ ) -> List[Message]:
+ """Fetch messages from IMAP server."""
+ if not self.client:
+ raise RepairableException("IMAP client not connected. Call connect() first.")
+
+ loop = asyncio.get_event_loop()
+ messages: List[Message] = []
+
+ def _sync_fetch():
+ # Select inbox
+ self.client.select_folder("INBOX")
+
+ # Build search criteria
+ search_criteria = []
+ if filter.get("unread", True):
+ search_criteria.append("UNSEEN")
+
+ if filter.get("since_date"):
+ since_date = filter["since_date"]
+ search_criteria.append(["SINCE", since_date])
+
+ # Search for messages
+ if not search_criteria:
+ search_criteria = ["ALL"]
+
+ message_ids = self.client.search(search_criteria)
+ return message_ids
+
+ message_ids = await loop.run_in_executor(None, _sync_fetch)
+
+ if not message_ids:
+ PrintStyle.hint("No messages found matching criteria")
+ return messages
+
+ PrintStyle.standard(f"Found {len(message_ids)} messages")
+
+ # Fetch and process messages
+ for msg_id in message_ids:
+ try:
+ msg = await self._fetch_and_parse_imap_message(msg_id, download_folder, filter)
+ if msg:
+ messages.append(msg)
+ except Exception as e:
+ PrintStyle.error(f"Error processing message {msg_id}: {format_error(e)}")
+ continue
+
+ return messages
+
+ async def _fetch_and_parse_imap_message(
+ self,
+ msg_id: int,
+ download_folder: str,
+ filter: Dict[str, Any],
+ ) -> Optional[Message]:
+ """Fetch and parse a single IMAP message with retry logic for large messages."""
+ loop = asyncio.get_event_loop()
+
+ def _sync_fetch():
+ try:
+ # Try standard RFC822 fetch first
+ return self.client.fetch([msg_id], ["RFC822"])[msg_id]
+ except Exception as e:
+ error_msg = str(e).lower()
+ # If "line too long" error, try fetching in parts
+ if "line too long" in error_msg or "fetch_failed" in error_msg:
+ PrintStyle.warning(f"Message {msg_id} too large for standard fetch, trying alternative method")
+ # Fetch headers and body separately to avoid line length issues
+ try:
+ envelope = self.client.fetch([msg_id], ["BODY.PEEK[]"])[msg_id]
+ return envelope
+ except Exception as e2:
+ PrintStyle.error(f"Alternative fetch also failed for message {msg_id}: {format_error(e2)}")
+ raise
+ raise
+
+ try:
+ raw_msg = await loop.run_in_executor(None, _sync_fetch)
+
+ # Extract email data from response
+ if b"RFC822" in raw_msg:
+ email_data = raw_msg[b"RFC822"]
+ elif b"BODY[]" in raw_msg:
+ email_data = raw_msg[b"BODY[]"]
+ else:
+ PrintStyle.error(f"Unexpected response format for message {msg_id}")
+ return None
+
+ email_msg = email.message_from_bytes(email_data)
+
+ # Apply sender filter
+ sender = self._decode_header(email_msg.get("From", ""))
+ if filter.get("sender") and not fnmatch(sender, filter["sender"]):
+ return None
+
+ # Apply subject filter
+ subject = self._decode_header(email_msg.get("Subject", ""))
+ if filter.get("subject") and not fnmatch(subject, filter["subject"]):
+ return None
+
+ # Parse message
+ return await self._parse_message(email_msg, download_folder)
+
+ except Exception as e:
+ PrintStyle.error(f"Failed to fetch/parse message {msg_id}: {format_error(e)}")
+ return None
+
+ async def _fetch_exchange_messages(
+ self,
+ download_folder: str,
+ filter: Dict[str, Any],
+ ) -> List[Message]:
+ """Fetch messages from Exchange server."""
+ if not self.exchange_account:
+ raise RepairableException("Exchange account not connected. Call connect() first.")
+
+ from exchangelib import Q
+
+ loop = asyncio.get_event_loop()
+ messages: List[Message] = []
+
+ def _sync_fetch():
+ # Build query
+ query = None
+ if filter.get("unread", True):
+ query = Q(is_read=False)
+
+ if filter.get("sender"):
+ sender_pattern = filter["sender"].replace("*", "")
+ sender_q = Q(sender__contains=sender_pattern)
+ query = query & sender_q if query else sender_q
+
+ if filter.get("subject"):
+ subject_pattern = filter["subject"].replace("*", "")
+ subject_q = Q(subject__contains=subject_pattern)
+ query = query & subject_q if query else subject_q
+
+ # Fetch messages from inbox
+ inbox = self.exchange_account.inbox
+ items = inbox.filter(query) if query else inbox.all()
+ return list(items)
+
+ exchange_messages = await loop.run_in_executor(None, _sync_fetch)
+
+ PrintStyle.standard(f"Found {len(exchange_messages)} Exchange messages")
+
+ # Process messages
+ for ex_msg in exchange_messages:
+ try:
+ msg = await self._parse_exchange_message(ex_msg, download_folder)
+ if msg:
+ messages.append(msg)
+ except Exception as e:
+ PrintStyle.error(f"Error processing Exchange message: {format_error(e)}")
+ continue
+
+ return messages
+
+ async def _parse_exchange_message(
+ self,
+ ex_msg,
+ download_folder: str,
+ ) -> Message:
+ """Parse an Exchange message."""
+ loop = asyncio.get_event_loop()
+
+ def _get_body():
+ return str(ex_msg.text_body or ex_msg.body or "")
+
+ body = await loop.run_in_executor(None, _get_body)
+
+ # Process HTML if present
+ if ex_msg.body and str(ex_msg.body).strip().startswith("<"):
+ body = self._html_to_text(str(ex_msg.body))
+
+ # Save attachments
+ attachment_paths = []
+ if ex_msg.attachments:
+ for attachment in ex_msg.attachments:
+ if hasattr(attachment, "content"):
+ path = await self._save_attachment_bytes(
+ attachment.name,
+ attachment.content,
+ download_folder
+ )
+ attachment_paths.append(path)
+
+ return Message(
+ sender=str(ex_msg.sender.email_address) if ex_msg.sender else "",
+ subject=str(ex_msg.subject or ""),
+ body=body,
+ attachments=attachment_paths
+ )
+
+ async def _parse_message(
+ self,
+ email_msg: EmailMessage,
+ download_folder: str,
+ ) -> Message:
+ """
+ Parse email message and extract content with inline attachments.
+
+ Processes multipart messages, converts HTML to text, and maintains
+ positional context for inline attachments.
+ """
+ sender = self._decode_header(email_msg.get("From", ""))
+ subject = self._decode_header(email_msg.get("Subject", ""))
+
+ # Extract body and attachments
+ body = ""
+ attachment_paths: List[str] = []
+ cid_map: Dict[str, str] = {} # Map Content-ID to file paths
+ body_parts: List[str] = [] # Track parts in order
+
+ if email_msg.is_multipart():
+ # Process parts in order to maintain attachment positions
+ for part in email_msg.walk():
+ content_type = part.get_content_type()
+ content_disposition = str(part.get("Content-Disposition", ""))
+
+ # Skip multipart containers
+ if part.get_content_maintype() == "multipart":
+ continue
+
+ # Handle attachments
+ if "attachment" in content_disposition or part.get("Content-ID"):
+ filename = part.get_filename()
+ if filename:
+ filename = self._decode_header(filename)
+ content = part.get_payload(decode=True)
+ if content:
+ path = await self._save_attachment_bytes(
+ filename, content, download_folder
+ )
+ attachment_paths.append(path)
+
+ # Map Content-ID for inline images
+ cid = part.get("Content-ID")
+ if cid:
+ cid = cid.strip("<>")
+ cid_map[cid] = path
+
+ # Add positional marker for non-cid attachments
+ # (cid attachments are positioned via HTML references)
+ if not cid and body_parts:
+ body_parts.append(f"\n[file://{path}]\n")
+
+ # Handle body text
+ elif content_type == "text/plain":
+ if not body: # Use first text/plain as primary body
+ charset = part.get_content_charset() or "utf-8"
+ body = part.get_payload(decode=True).decode(charset, errors="ignore")
+ body_parts.append(body)
+
+ elif content_type == "text/html":
+ if not body: # Use first text/html as primary body if no text/plain
+ charset = part.get_content_charset() or "utf-8"
+ html_content = part.get_payload(decode=True).decode(charset, errors="ignore")
+ body = self._html_to_text(html_content, cid_map)
+ body_parts.append(body)
+
+ # Combine body parts if we built them up
+ if len(body_parts) > 1:
+ body = "".join(body_parts)
+ else:
+ # Single part message
+ content_type = email_msg.get_content_type()
+ charset = email_msg.get_content_charset() or "utf-8"
+ content = email_msg.get_payload(decode=True)
+ if content:
+ if content_type == "text/html":
+ body = self._html_to_text(content.decode(charset, errors="ignore"), cid_map)
+ else:
+ body = content.decode(charset, errors="ignore")
+
+ return Message(
+ sender=sender,
+ subject=subject,
+ body=body,
+ attachments=attachment_paths
+ )
+
+ def _html_to_text(self, html_content: str, cid_map: Optional[Dict[str, str]] = None) -> str:
+ """
+ Convert HTML to plain text with inline attachment references.
+
+ Replaces inline images with [file:///a0/...] markers to maintain
+ positional context.
+ """
+ cid_map = cid_map or {}
+
+ # Replace cid: references with file paths before conversion
+ if cid_map:
+ soup = BeautifulSoup(html_content, "html.parser")
+ for img in soup.find_all("img"):
+ src = img.get("src", "")
+ if src.startswith("cid:"):
+ cid = src[4:] # Remove "cid:" prefix
+ if cid in cid_map:
+ # Replace with file path marker
+ file_marker = f"[file://{cid_map[cid]}]"
+ img.replace_with(soup.new_string(file_marker))
+ html_content = str(soup)
+
+ # Convert HTML to text
+ h = html2text.HTML2Text()
+ h.ignore_links = False
+ h.ignore_images = False
+ h.ignore_emphasis = False
+ h.body_width = 0 # Don't wrap lines
+
+ text = h.handle(html_content)
+
+ # Clean up extra whitespace
+ text = re.sub(r"\n{3,}", "\n\n", text) # Max 2 consecutive newlines
+ text = text.strip()
+
+ return text
+
+ async def _save_attachment_bytes(
+ self,
+ filename: str,
+ content: bytes,
+ download_folder: str,
+ ) -> str:
+ """
+ Save attachment to disk and return absolute path.
+
+ Uses Agent Zero's file helpers for path management.
+ """
+ # Sanitize filename
+ filename = files.safe_file_name(filename)
+
+ # Generate unique filename if needed
+ unique_id = uuid.uuid4().hex[:8]
+ name, ext = os.path.splitext(filename)
+ unique_filename = f"{name}_{unique_id}{ext}"
+
+ # Build relative path and save
+ relative_path = os.path.join(download_folder, unique_filename)
+ files.write_file_bin(relative_path, content)
+
+ # Return absolute path
+ abs_path = files.get_abs_path(relative_path)
+ return abs_path
+
+ def _decode_header(self, header: str) -> str:
+ """Decode email header handling various encodings."""
+ if not header:
+ return ""
+
+ decoded_parts = []
+ for part, encoding in decode_header(header):
+ if isinstance(part, bytes):
+ decoded_parts.append(part.decode(encoding or "utf-8", errors="ignore"))
+ else:
+ decoded_parts.append(str(part))
+
+ return " ".join(decoded_parts)
+
+
+async def read_messages(
+ account_type: str = "imap",
+ server: str = "",
+ port: int = 993,
+ username: str = "",
+ password: str = "",
+ download_folder: str = "tmp/email",
+ options: Optional[Dict[str, Any]] = None,
+ filter: Optional[Dict[str, Any]] = None,
+) -> List[Message]:
+ """
+ Convenience wrapper for reading email messages.
+
+ Automatically handles connection and disconnection.
+
+ Args:
+ account_type: "imap" or "exchange"
+ server: Mail server address
+ port: Server port (default 993 for IMAP SSL)
+ username: Email username
+ password: Email password
+ download_folder: Folder to save attachments (relative to /a0/)
+ options: Optional configuration dict
+ filter: Filter criteria dict
+
+ Returns:
+ List of Message objects
+
+ Example:
+ from python.helpers.email_client import read_messages
+ messages = await read_messages(
+ server="imap.gmail.com",
+ port=993,
+ username=secrets.get("EMAIL_USER"),
+ password=secrets.get("EMAIL_PASSWORD"),
+ download_folder="tmp/email/inbox",
+ filter={"unread": True, "sender": "*@company.com"}
+ )
+ """
+ client = EmailClient(
+ account_type=account_type,
+ server=server,
+ port=port,
+ username=username,
+ password=password,
+ options=options,
+ )
+
+ try:
+ await client.connect()
+ messages = await client.read_messages(download_folder, filter)
+ return messages
+ finally:
+ await client.disconnect()
diff --git a/python/helpers/extension.py b/python/helpers/extension.py
index 5c12d48066..186099cc02 100644
--- a/python/helpers/extension.py
+++ b/python/helpers/extension.py
@@ -1,14 +1,22 @@
from abc import abstractmethod
from typing import Any
-from python.helpers import extract_tools, files
+from python.helpers import extract_tools, files
from typing import TYPE_CHECKING
+
if TYPE_CHECKING:
from agent import Agent
+
+DEFAULT_EXTENSIONS_FOLDER = "python/extensions"
+USER_EXTENSIONS_FOLDER = "usr/extensions"
+
+_cache: dict[str, list[type["Extension"]]] = {}
+
+
class Extension:
def __init__(self, agent: "Agent|None", **kwargs):
- self.agent: "Agent" = agent # type: ignore < here we ignore the type check as there are currently no extensions without an agent
+ self.agent: "Agent" = agent # type: ignore < here we ignore the type check as there are currently no extensions without an agent
self.kwargs = kwargs
@abstractmethod
@@ -16,25 +24,26 @@ async def execute(self, **kwargs) -> Any:
pass
-async def call_extensions(extension_point: str, agent: "Agent|None" = None, **kwargs) -> Any:
-
- # get default extensions
- defaults = await _get_extensions("python/extensions/" + extension_point)
- classes = defaults
+async def call_extensions(
+ extension_point: str, agent: "Agent|None" = None, **kwargs
+) -> Any:
+ from python.helpers import projects, subagents
- # get agent extensions
- if agent and agent.config.profile:
- agentics = await _get_extensions("agents/" + agent.config.profile + "/extensions/" + extension_point)
- if agentics:
- # merge them, agentics overwrite defaults
- unique = {}
- for cls in defaults + agentics:
- unique[_get_file_from_module(cls.__module__)] = cls
+ # search for extension folders in all agent's paths
+ paths = subagents.get_paths(agent, "extensions", extension_point, default_root="python")
+ all_exts = [cls for path in paths for cls in _get_extensions(path)]
- # sort by name
- classes = sorted(unique.values(), key=lambda cls: _get_file_from_module(cls.__module__))
+ # merge: first ocurrence of file name is the override
+ unique = {}
+ for cls in all_exts:
+ file = _get_file_from_module(cls.__module__)
+ if file not in unique:
+ unique[file] = cls
+ classes = sorted(
+ unique.values(), key=lambda cls: _get_file_from_module(cls.__module__)
+ )
- # call extensions
+ # execute unique extensions
for cls in classes:
await cls(agent=agent).execute(**kwargs)
@@ -42,8 +51,8 @@ async def call_extensions(extension_point: str, agent: "Agent|None" = None, **kw
def _get_file_from_module(module_name: str) -> str:
return module_name.split(".")[-1]
-_cache: dict[str, list[type[Extension]]] = {}
-async def _get_extensions(folder:str):
+
+def _get_extensions(folder: str):
global _cache
folder = files.get_abs_path(folder)
if folder in _cache:
@@ -51,10 +60,7 @@ async def _get_extensions(folder:str):
else:
if not files.exists(folder):
return []
- classes = extract_tools.load_classes_from_folder(
- folder, "*", Extension
- )
+ classes = extract_tools.load_classes_from_folder(folder, "*", Extension)
_cache[folder] = classes
return classes
-
diff --git a/python/helpers/fasta2a_server.py b/python/helpers/fasta2a_server.py
index d058d1b683..000c5a39df 100644
--- a/python/helpers/fasta2a_server.py
+++ b/python/helpers/fasta2a_server.py
@@ -6,7 +6,7 @@
import contextlib
import threading
-from python.helpers import settings
+from python.helpers import settings, projects
from starlette.requests import Request
# Local imports
@@ -84,6 +84,14 @@ async def run_task(self, params: Any) -> None: # params: TaskSendParams
cfg = initialize_agent()
context = AgentContext(cfg, type=AgentContextType.BACKGROUND)
+ # Retrieve project from message.metadata (standard A2A pattern)
+ metadata = message.get('metadata', {}) or {}
+ project_name = metadata.get('project')
+
+ # Activate project if specified
+ if project_name:
+ projects.activate_project(context.id, project_name)
+
# Log user message so it appears instantly in UI chat window
context.log.log(
type="user", # type: ignore[arg-type]
@@ -424,6 +432,9 @@ async def __call__(self, scope, receive, send):
if path.startswith('/a2a'):
path = path[4:] # Remove '/a2a' prefix
+ # Initialize project name
+ project_name = None
+
# Check if path matches token pattern /t-{token}/
if path.startswith('/t-'):
# Extract token from path
@@ -431,6 +442,14 @@ async def __call__(self, scope, receive, send):
path_parts = path[3:].split('/', 1) # Remove '/t-' prefix
request_token = path_parts[0]
remaining_path = '/' + path_parts[1] if len(path_parts) > 1 else '/'
+
+ # Check for project pattern /p-{project}/
+ if remaining_path.startswith('/p-'):
+ project_parts = remaining_path[3:].split('/', 1)
+ if project_parts[0]:
+ project_name = project_parts[0]
+ remaining_path = '/' + project_parts[1] if len(project_parts) > 1 else '/'
+ _PRINTER.print(f"[A2A] Extracted project from URL: {project_name}")
else:
request_token = path[3:]
remaining_path = '/'
@@ -452,6 +471,54 @@ async def __call__(self, scope, receive, send):
})
return
+ # If project specified, inject it into the request payload
+ if project_name:
+ # Buffer messages and modify before returning the complete body
+ received_messages = []
+ body_modified = False
+ original_receive = receive
+
+ async def receive_wrapper():
+ nonlocal body_modified
+
+ # Receive and buffer the next message
+ message = await original_receive()
+ received_messages.append(message)
+
+ # When we get the complete body, inject project into JSON
+ if message['type'] == 'http.request' and not message.get('more_body', False) and not body_modified:
+ body_modified = True
+ try:
+ import json
+ # Reconstruct full body from all buffered messages
+ body_parts = [msg.get('body', b'') for msg in received_messages if msg['type'] == 'http.request']
+ full_body = b''.join(body_parts)
+ data = json.loads(full_body)
+
+ # INJECT project into message.metadata (standard A2A pattern)
+ if 'params' in data and 'message' in data['params']:
+ msg_data = data['params']['message']
+ # Initialize metadata if it doesn't exist
+ if 'metadata' not in msg_data or msg_data['metadata'] is None:
+ msg_data['metadata'] = {}
+ msg_data['metadata']['project'] = project_name
+
+ # Serialize back to JSON
+ modified_body = json.dumps(data).encode('utf-8')
+
+ # Return modified message IMMEDIATELY (before FastA2A processes it)
+ return {
+ 'type': 'http.request',
+ 'body': modified_body,
+ 'more_body': False
+ }
+ except Exception as e:
+ _PRINTER.print(f"[A2A] Failed to inject project into payload: {e}")
+
+ return message
+
+ receive = receive_wrapper
+
# Update scope with cleaned path
scope = dict(scope)
scope['path'] = remaining_path
diff --git a/python/helpers/file_tree.py b/python/helpers/file_tree.py
new file mode 100644
index 0000000000..c53dbcda56
--- /dev/null
+++ b/python/helpers/file_tree.py
@@ -0,0 +1,660 @@
+from __future__ import annotations
+
+from collections import deque
+from dataclasses import dataclass
+from datetime import datetime, timezone
+import os
+from typing import Any, Callable, Iterable, Literal, Optional, Sequence
+
+from pathspec import PathSpec
+
+from python.helpers.files import get_abs_path
+
+SORT_BY_NAME = "name"
+SORT_BY_CREATED = "created"
+SORT_BY_MODIFIED = "modified"
+
+SORT_ASC = "asc"
+SORT_DESC = "desc"
+
+OUTPUT_MODE_STRING = "string"
+OUTPUT_MODE_FLAT = "flat"
+OUTPUT_MODE_NESTED = "nested"
+
+
+def file_tree(
+ relative_path: str,
+ *,
+ max_depth: int = 0,
+ max_lines: int = 0,
+ folders_first: bool = True,
+ max_folders: int = 0,
+ max_files: int = 0,
+ sort: tuple[Literal["name", "created", "modified"], Literal["asc", "desc"]] = ("modified", "desc"),
+ ignore: str | None = None,
+ output_mode: Literal["string", "flat", "nested"] = OUTPUT_MODE_STRING,
+) -> str | list[dict]:
+ """Render a directory tree relative to the repository base path.
+
+ Parameters:
+ relative_path: Base directory (relative to project root) to scan with :func:`get_abs_path`.
+ max_depth: Maximum depth of traversal (0 = unlimited). Depth starts at 1 for root entries.
+ max_lines: Global limit for rendered lines (0 = unlimited). When exceeded, the current depth
+ finishes rendering before deeper levels are skipped.
+ folders_first: When True, folders render before files within each directory.
+ max_folders: Optional per-directory cap (0 = unlimited) on rendered folder entries before adding a
+ ``# N more folders`` comment. When only a single folder exceeds the limit and ``max_folders`` is greater than zero, that folder is rendered
+ directly instead of emitting a summary comment.
+ max_files: Optional per-directory cap (0 = unlimited) on rendered file entries before adding a ``# N more files`` comment.
+ As with folders, a single excess file is rendered when ``max_files`` is greater than zero.
+ sort: Tuple of ``(key, direction)`` where key is one of :data:`SORT_BY_NAME`,
+ :data:`SORT_BY_CREATED`, or :data:`SORT_BY_MODIFIED`; direction is :data:`SORT_ASC`
+ or :data:`SORT_DESC`.
+ ignore: Inline ``.gitignore`` content or ``file:`` reference. Examples::
+
+ ignore=\"\"\"\\n*.pyc\\n__pycache__/\\n!important.py\\n\"\"\"
+ ignore=\"file:.gitignore\" # relative to scan root
+ ignore=\"file://.gitignore\" # URI-style relative path
+ ignore=\"file:/abs/path/.gitignore\"
+ ignore=\"file:///abs/path/.gitignore\"
+
+ output_mode: One of :data:`OUTPUT_MODE_STRING`, :data:`OUTPUT_MODE_FLAT`, or
+ :data:`OUTPUT_MODE_NESTED`.
+
+ Returns:
+ ``OUTPUT_MODE_STRING`` β ``str``: multi-line ASCII tree.
+ ``OUTPUT_MODE_FLAT`` β ``list[dict]``: flattened sequence of TreeItem dictionaries.
+ ``OUTPUT_MODE_NESTED`` β ``list[dict]``: nested TreeItem dictionaries where folders
+ include ``items`` arrays.
+
+ Notes:
+ * The utility is synchronous; avoid calling from latency-sensitive async loops.
+ * The ASCII renderer walks the established tree depth-first so connectors reflect parent/child structure,
+ while traversal and limit calculations remain breadth-first by depth. When ``max_lines`` is set, the number
+ of non-comment entries (excluding the root banner) never exceeds that limit; informational summary comments
+ are emitted in addition when necessary.
+ * ``created`` and ``modified`` values in structured outputs are timezone-aware UTC
+ :class:`datetime.datetime` objects::
+
+ item = flat_items[0]
+ iso = item[\"created\"].isoformat()
+ epoch = item[\"created\"].timestamp()
+
+ """
+ abs_root = get_abs_path(relative_path)
+
+ if not os.path.exists(abs_root):
+ raise FileNotFoundError(f"Path does not exist: {relative_path!r}")
+ if not os.path.isdir(abs_root):
+ raise NotADirectoryError(f"Expected a directory, received: {relative_path!r}")
+
+ sort_key, sort_direction = sort
+ if sort_key not in {SORT_BY_NAME, SORT_BY_CREATED, SORT_BY_MODIFIED}:
+ raise ValueError(f"Unsupported sort key: {sort_key!r}")
+ if sort_direction not in {SORT_ASC, SORT_DESC}:
+ raise ValueError(f"Unsupported sort direction: {sort_direction!r}")
+ if output_mode not in {OUTPUT_MODE_STRING, OUTPUT_MODE_FLAT, OUTPUT_MODE_NESTED}:
+ raise ValueError(f"Unsupported output mode: {output_mode!r}")
+ if max_depth < 0:
+ raise ValueError("max_depth must be >= 0")
+ if max_lines < 0:
+ raise ValueError("max_lines must be >= 0")
+
+ ignore_spec = _resolve_ignore_patterns(ignore, abs_root)
+
+ root_stat = os.stat(abs_root, follow_symlinks=False)
+ root_name = os.path.basename(os.path.normpath(abs_root)) or os.path.basename(abs_root)
+ root_node = _TreeEntry(
+ name=root_name,
+ level=0,
+ item_type="folder",
+ created=datetime.fromtimestamp(root_stat.st_ctime, tz=timezone.utc),
+ modified=datetime.fromtimestamp(root_stat.st_mtime, tz=timezone.utc),
+ parent=None,
+ items=[],
+ rel_path="",
+ )
+
+ queue: deque[tuple[_TreeEntry, str, int]] = deque([(root_node, abs_root, 1)])
+ nodes_in_order: list[_TreeEntry] = []
+ rendered_count = 0
+ limit_reached = False
+ visibility_cache: dict[str, bool] = {}
+
+ def make_entry(entry: os.DirEntry, parent: _TreeEntry, level: int, item_type: Literal["file", "folder"]) -> _TreeEntry:
+ stat = entry.stat(follow_symlinks=False)
+ rel_path = os.path.relpath(entry.path, abs_root)
+ rel_posix = _normalize_relative_path(rel_path)
+ return _TreeEntry(
+ name=entry.name,
+ level=level,
+ item_type=item_type,
+ created=datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc),
+ modified=datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc),
+ parent=parent,
+ items=[] if item_type == "folder" else None,
+ rel_path=rel_posix,
+ )
+
+ while queue and not limit_reached:
+ parent_node, current_dir, level = queue.popleft()
+
+ if max_depth and level > max_depth:
+ continue
+
+ remaining_depth = max_depth - level if max_depth else -1
+ folders, files = _list_directory_children(
+ current_dir,
+ abs_root,
+ ignore_spec,
+ max_depth_remaining=remaining_depth,
+ cache=visibility_cache,
+ )
+
+ folder_entries = [make_entry(folder, parent_node, level, "folder") for folder in folders]
+ file_entries = [make_entry(file_entry, parent_node, level, "file") for file_entry in files]
+
+ children = _apply_sorting_and_limits(
+ folder_entries,
+ file_entries,
+ folders_first=folders_first,
+ sort=sort,
+ max_folders=max_folders,
+ max_files=max_files,
+ directory_node=parent_node,
+ )
+
+ trimmed_children: list[_TreeEntry] = []
+ hidden_children_local: list[_TreeEntry] = []
+ if max_lines and rendered_count >= max_lines:
+ limit_reached = True
+ hidden_children_local = children
+ else:
+ for index, child in enumerate(children):
+ if max_lines and rendered_count >= max_lines:
+ limit_reached = True
+ hidden_children_local = children[index:]
+ break
+ trimmed_children.append(child)
+ nodes_in_order.append(child)
+ is_global_summary = (
+ child.item_type == "comment"
+ and child.rel_path.endswith("#summary:limit")
+ )
+ if not is_global_summary:
+ rendered_count += 1
+ if limit_reached and hidden_children_local:
+ summary = _create_global_limit_comment(
+ parent_node,
+ hidden_children_local,
+ )
+ trimmed_children.append(summary)
+ nodes_in_order.append(summary)
+
+ parent_node.items = trimmed_children or None
+
+ if limit_reached:
+ break
+
+ for child in trimmed_children:
+ if child.item_type != "folder":
+ continue
+ if max_depth and level >= max_depth:
+ continue
+ child_abs = os.path.join(current_dir, child.name)
+ queue.append((child, child_abs, level + 1))
+
+ remaining_queue = list(queue) if limit_reached else []
+ queue.clear()
+
+ if limit_reached and remaining_queue:
+ for folder_node, folder_path, _ in remaining_queue:
+ summary = _create_folder_unprocessed_comment(
+ folder_node,
+ folder_path,
+ abs_root,
+ ignore_spec,
+ )
+ if summary is None:
+ continue
+ folder_node.items = (folder_node.items or []) + [summary]
+ nodes_in_order.append(summary)
+
+ visible_nodes = nodes_in_order
+
+ visible_ids = {id(node) for node in visible_nodes}
+ if visible_ids:
+ _prune_to_visible(root_node, visible_ids)
+
+ _mark_last_flags(root_node)
+ _refresh_render_metadata(root_node)
+
+ def iter_visible() -> Iterable[_TreeEntry]:
+ for node in _iter_depth_first(root_node.items or []):
+ if not visible_ids or id(node) in visible_ids:
+ yield node
+
+ if output_mode == OUTPUT_MODE_STRING:
+ display_name = relative_path.strip() or root_name
+ root_line = f"{display_name.rstrip(os.sep)}/"
+ lines = [root_line]
+ for node in iter_visible():
+ lines.append(node.text)
+ return "\n".join(lines)
+
+ if output_mode == OUTPUT_MODE_FLAT:
+ return _build_tree_items_flat(list(iter_visible()))
+
+ return _to_nested_structure(root_node.items or [])
+
+
+@dataclass(slots=True)
+class _TreeEntry:
+ name: str
+ level: int
+ item_type: Literal["file", "folder", "comment"]
+ created: datetime
+ modified: datetime
+ parent: Optional["_TreeEntry"] = None
+ items: Optional[list["_TreeEntry"]] = None
+ is_last: bool = False
+ rel_path: str = ""
+ text: str = ""
+
+ def as_dict(self) -> dict[str, Any]:
+ return {
+ "name": self.name,
+ "level": self.level,
+ "type": self.item_type,
+ "created": self.created,
+ "modified": self.modified,
+ "text": self.text,
+ "items": [child.as_dict() for child in self.items] if self.items is not None else None,
+ }
+
+
+def _normalize_relative_path(path: str) -> str:
+ normalized = path.replace(os.sep, "/")
+ if normalized in {".", ""}:
+ return ""
+ while normalized.startswith("./"):
+ normalized = normalized[2:]
+ return normalized
+
+
+def _directory_has_visible_entries(
+ directory: str,
+ root_abs_path: str,
+ ignore_spec: PathSpec,
+ cache: dict[str, bool],
+ max_depth_remaining: int,
+) -> bool:
+ if max_depth_remaining == 0:
+ return False
+
+ cached = cache.get(directory)
+ if cached is not None:
+ return cached
+
+ try:
+ with os.scandir(directory) as iterator:
+ for entry in iterator:
+ rel_path = os.path.relpath(entry.path, root_abs_path)
+ rel_posix = _normalize_relative_path(rel_path)
+ is_dir = entry.is_dir(follow_symlinks=False)
+
+ if is_dir:
+ ignored = ignore_spec.match_file(rel_posix) or ignore_spec.match_file(f"{rel_posix}/")
+ if ignored:
+ next_depth = max_depth_remaining - 1 if max_depth_remaining > 0 else -1
+ if next_depth == 0:
+ continue
+ if _directory_has_visible_entries(
+ entry.path,
+ root_abs_path,
+ ignore_spec,
+ cache,
+ next_depth,
+ ):
+ cache[directory] = True
+ return True
+ continue
+ else:
+ if ignore_spec.match_file(rel_posix):
+ continue
+
+ cache[directory] = True
+ return True
+ except FileNotFoundError:
+ cache[directory] = False
+ return False
+
+ cache[directory] = False
+ return False
+
+
+def _create_summary_comment(parent: _TreeEntry, noun: str, count: int) -> _TreeEntry:
+ label = noun
+ if count == 1 and noun.endswith("s"):
+ label = noun[:-1]
+ elif count > 1 and not noun.endswith("s"):
+ label = f"{noun}s"
+ return _TreeEntry(
+ name=f"{count} more {label}",
+ level=parent.level + 1,
+ item_type="comment",
+ created=parent.created,
+ modified=parent.modified,
+ parent=parent,
+ items=None,
+ rel_path=f"{parent.rel_path}#summary:{noun}:{count}",
+ )
+
+
+def _create_global_limit_comment(parent: _TreeEntry, hidden_children: Sequence[_TreeEntry]) -> _TreeEntry:
+ folders = sum(1 for child in hidden_children if child.item_type == "folder")
+ files = sum(1 for child in hidden_children if child.item_type == "file")
+ parts: list[str] = []
+ if folders:
+ label = "folder" if folders == 1 else "folders"
+ parts.append(f"{folders} {label}")
+ if files:
+ label = "file" if files == 1 else "files"
+ parts.append(f"{files} {label}")
+ if not parts:
+ remaining = len(hidden_children)
+ label = "item" if remaining == 1 else "items"
+ parts.append(f"{remaining} {label}")
+ label_text = ", ".join(parts)
+ return _TreeEntry(
+ name=f"limit reached β hidden: {label_text}",
+ level=parent.level + 1,
+ item_type="comment",
+ created=parent.created,
+ modified=parent.modified,
+ parent=parent,
+ items=None,
+ rel_path=f"{parent.rel_path}#summary:limit",
+ )
+
+
+def _create_folder_unprocessed_comment(
+ folder_node: _TreeEntry,
+ folder_path: str,
+ abs_root: str,
+ ignore_spec: Optional[PathSpec],
+) -> Optional[_TreeEntry]:
+ try:
+ folders, files = _list_directory_children(
+ folder_path,
+ abs_root,
+ ignore_spec,
+ max_depth_remaining=-1,
+ cache={},
+ )
+ except FileNotFoundError:
+ return None
+
+ hidden_entries: list[_TreeEntry] = []
+ for entry in folders:
+ stat = entry.stat(follow_symlinks=False)
+ hidden_entries.append(
+ _TreeEntry(
+ name=entry.name,
+ level=folder_node.level + 1,
+ item_type="folder",
+ created=datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc),
+ modified=datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc),
+ parent=folder_node,
+ items=None,
+ rel_path=os.path.join(folder_node.rel_path, entry.name),
+ )
+ )
+ for entry in files:
+ stat = entry.stat(follow_symlinks=False)
+ hidden_entries.append(
+ _TreeEntry(
+ name=entry.name,
+ level=folder_node.level + 1,
+ item_type="file",
+ created=datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc),
+ modified=datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc),
+ parent=folder_node,
+ items=None,
+ rel_path=os.path.join(folder_node.rel_path, entry.name),
+ )
+ )
+
+ if not hidden_entries:
+ return None
+
+ return _create_global_limit_comment(folder_node, hidden_entries)
+
+
+def _prune_to_visible(node: _TreeEntry, visible_ids: set[int]) -> None:
+ if node.items is None:
+ return
+ filtered: list[_TreeEntry] = []
+ for child in node.items:
+ if not visible_ids or id(child) in visible_ids:
+ _prune_to_visible(child, visible_ids)
+ filtered.append(child)
+ node.items = filtered or None
+
+
+def _mark_last_flags(node: _TreeEntry) -> None:
+ if node.items is None:
+ return
+ total = len(node.items)
+ for index, child in enumerate(node.items):
+ child.is_last = index == total - 1
+ _mark_last_flags(child)
+
+
+def _refresh_render_metadata(node: _TreeEntry) -> None:
+ if node.items is None:
+ return
+ for child in node.items:
+ child.text = _format_line(child)
+ _refresh_render_metadata(child)
+
+
+def _resolve_ignore_patterns(ignore: str | None, root_abs_path: str) -> Optional[PathSpec]:
+ if ignore is None:
+ return None
+
+ content: str
+ if ignore.startswith("file:"):
+ reference = ignore[5:]
+ if reference.startswith("///"):
+ reference_path = reference[2:]
+ elif reference.startswith("//"):
+ reference_path = os.path.join(root_abs_path, reference[2:])
+ elif reference.startswith("/"):
+ reference_path = reference
+ else:
+ reference_path = os.path.join(root_abs_path, reference)
+
+ try:
+ with open(reference_path, "r", encoding="utf-8") as handle:
+ content = handle.read()
+ except FileNotFoundError as exc:
+ raise FileNotFoundError(f"Ignore file not found: {reference_path}") from exc
+ else:
+ content = ignore
+
+ lines = [
+ line.strip()
+ for line in content.splitlines()
+ if line.strip() and not line.strip().startswith("#")
+ ]
+
+ if not lines:
+ return None
+
+ return PathSpec.from_lines("gitwildmatch", lines)
+
+
+def _list_directory_children(
+ directory: str,
+ root_abs_path: str,
+ ignore_spec: Optional[PathSpec],
+ *,
+ max_depth_remaining: int,
+ cache: dict[str, bool],
+) -> tuple[list[os.DirEntry], list[os.DirEntry]]:
+ folders: list[os.DirEntry] = []
+ files: list[os.DirEntry] = []
+
+ try:
+ with os.scandir(directory) as iterator:
+ for entry in iterator:
+ if entry.name in (".", ".."):
+ continue
+ rel_path = os.path.relpath(entry.path, root_abs_path)
+ rel_posix = _normalize_relative_path(rel_path)
+ is_directory = entry.is_dir(follow_symlinks=False)
+
+ if ignore_spec:
+ if is_directory:
+ ignored = ignore_spec.match_file(rel_posix) or ignore_spec.match_file(f"{rel_posix}/")
+ if ignored:
+ if _directory_has_visible_entries(
+ entry.path,
+ root_abs_path,
+ ignore_spec,
+ cache,
+ max_depth_remaining - 1,
+ ):
+ folders.append(entry)
+ continue
+ else:
+ if ignore_spec.match_file(rel_posix):
+ continue
+
+ if is_directory:
+ folders.append(entry)
+ else:
+ files.append(entry)
+ except FileNotFoundError:
+ return ([], [])
+
+ return (folders, files)
+
+
+def _apply_sorting_and_limits(
+ folders: list[_TreeEntry],
+ files: list[_TreeEntry],
+ *,
+ folders_first: bool,
+ sort: tuple[str, str],
+ max_folders: int | None,
+ max_files: int | None,
+ directory_node: _TreeEntry,
+) -> list[_TreeEntry]:
+ sort_key, sort_dir = sort
+ reverse = sort_dir == SORT_DESC
+
+ def key_fn(node: _TreeEntry):
+ if sort_key == SORT_BY_NAME:
+ return node.name.casefold()
+ if sort_key == SORT_BY_CREATED:
+ return node.created
+ return node.modified
+
+ folders_sorted = sorted(folders, key=key_fn, reverse=reverse)
+ files_sorted = sorted(files, key=key_fn, reverse=reverse)
+ combined: list[_TreeEntry] = []
+
+ def append_group(group: list[_TreeEntry], limit: int | None, noun: str) -> None:
+ if limit == 0:
+ limit = None
+ if not group:
+ return
+ if limit is None:
+ combined.extend(group)
+ return
+
+ limit = max(limit, 0)
+ visible = group[:limit]
+ combined.extend(visible)
+
+ overflow = group[limit:]
+ if not overflow:
+ return
+
+ combined.append(
+ _create_summary_comment(
+ directory_node,
+ noun,
+ len(overflow),
+ )
+ )
+
+ if folders_first:
+ append_group(folders_sorted, max_folders, "folder")
+ append_group(files_sorted, max_files, "file")
+ else:
+ append_group(files_sorted, max_files, "file")
+ append_group(folders_sorted, max_folders, "folder")
+
+ return combined
+
+
+def _format_line(node: _TreeEntry) -> str:
+ segments: list[str] = []
+ ancestor = node.parent
+ while ancestor and ancestor.parent is not None:
+ segments.append(" " if ancestor.is_last else "β ")
+ ancestor = ancestor.parent
+ segments.reverse()
+
+ connector = "βββ " if node.is_last else "βββ "
+ if node.item_type == "folder":
+ label = f"{node.name}/"
+ elif node.item_type == "comment":
+ label = f"# {node.name}"
+ else:
+ label = node.name
+
+ return "".join(segments) + connector + label
+
+
+def _build_tree_items_flat(items: Sequence[_TreeEntry]) -> list[dict]:
+ return [
+ {
+ "name": node.name,
+ "level": node.level,
+ "type": node.item_type,
+ "created": node.created,
+ "modified": node.modified,
+ "text": node.text,
+ "items": None,
+ }
+ for node in items
+ ]
+
+
+def _to_nested_structure(items: Sequence[_TreeEntry]) -> list[dict]:
+ def convert(node: _TreeEntry) -> dict:
+ children = None
+ if node.items is not None:
+ children = [convert(child) for child in node.items]
+ return {
+ "name": node.name,
+ "level": node.level,
+ "type": node.item_type,
+ "created": node.created,
+ "modified": node.modified,
+ "text": node.text,
+ "items": children,
+ }
+
+ return [convert(item) for item in items]
+
+
+def _iter_depth_first(items: Sequence[_TreeEntry]) -> Iterable[_TreeEntry]:
+ for node in items:
+ yield node
+ if node.items:
+ yield from _iter_depth_first(node.items)
diff --git a/python/helpers/files.py b/python/helpers/files.py
index c90c701a20..0ed9cb06d6 100644
--- a/python/helpers/files.py
+++ b/python/helpers/files.py
@@ -14,15 +14,19 @@
import importlib.util
import inspect
import glob
+import mimetypes
+from simpleeval import simple_eval
class VariablesPlugin(ABC):
@abstractmethod
- def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]: # type: ignore
+ def get_variables(self, file: str, backup_dirs: list[str] | None = None, **kwargs) -> dict[str, Any]: # type: ignore
pass
-def load_plugin_variables(file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]:
+def load_plugin_variables(
+ file: str, backup_dirs: list[str] | None = None, **kwargs
+) -> dict[str, Any]:
if not file.endswith(".md"):
return {}
@@ -38,11 +42,14 @@ def load_plugin_variables(file: str, backup_dirs: list[str] | None = None) -> di
plugin_file = None
if plugin_file and exists(plugin_file):
-
+
from python.helpers import extract_tools
- classes = extract_tools.load_classes_from_file(plugin_file, VariablesPlugin, one_per_file=False)
+
+ classes = extract_tools.load_classes_from_file(
+ plugin_file, VariablesPlugin, one_per_file=False
+ )
for cls in classes:
- return cls().get_variables(file, backup_dirs) # type: ignore < abstract class here is ok, it is always a subclass
+ return cls().get_variables(file, backup_dirs, **kwargs) # type: ignore < abstract class here is ok, it is always a subclass
# load python code and extract variables variables from it
# module = None
@@ -70,10 +77,13 @@ def load_plugin_variables(file: str, backup_dirs: list[str] | None = None) -> di
# return cls[1]().get_variables() # type: ignore
return {}
+
from python.helpers.strings import sanitize_string
-def parse_file(_filename: str, _directories: list[str] | None = None, _encoding="utf-8", **kwargs):
+def parse_file(
+ _filename: str, _directories: list[str] | None = None, _encoding="utf-8", **kwargs
+):
if _directories is None:
_directories = []
@@ -84,10 +94,10 @@ def parse_file(_filename: str, _directories: list[str] | None = None, _encoding=
with open(absolute_path, "r", encoding=_encoding) as f:
# content = remove_code_fences(f.read())
content = f.read()
-
+
is_json = is_full_json_template(content)
content = remove_code_fences(content)
- variables = load_plugin_variables(absolute_path, _directories) or {} # type: ignore
+ variables = load_plugin_variables(absolute_path, _directories, **kwargs) or {} # type: ignore
variables.update(kwargs)
if is_json:
content = replace_placeholders_json(content, **variables)
@@ -99,12 +109,16 @@ def parse_file(_filename: str, _directories: list[str] | None = None, _encoding=
# Process include statements
content = process_includes(
# here we use kwargs, the plugin variables are not inherited
- content, _directories, **kwargs
+ content,
+ _directories,
+ **kwargs,
)
return content
-def read_prompt_file(_file: str, _directories: list[str] | None = None, _encoding="utf-8", **kwargs):
+def read_prompt_file(
+ _file: str, _directories: list[str] | None = None, _encoding="utf-8", **kwargs
+):
if _directories is None:
_directories = []
@@ -122,22 +136,74 @@ def read_prompt_file(_file: str, _directories: list[str] | None = None, _encodin
# content = remove_code_fences(f.read())
content = f.read()
- variables = load_plugin_variables(_file, _directories) or {} # type: ignore
+ variables = load_plugin_variables(_file, _directories, **kwargs) or {} # type: ignore
variables.update(kwargs)
+ # evaluate conditions
+ content = evaluate_text_conditions(content, **variables)
+
# Replace placeholders with values from kwargs
content = replace_placeholders_text(content, **variables)
# Process include statements
content = process_includes(
# here we use kwargs, the plugin variables are not inherited
- content, _directories, **kwargs
+ content,
+ _directories,
+ **kwargs,
)
return content
-def read_file(relative_path:str, encoding="utf-8"):
+def evaluate_text_conditions(_content: str, **kwargs):
+ # search for {{if ...}} ... {{endif}} blocks and evaluate conditions with nesting support
+ if_pattern = re.compile(r"{{\s*if\s+(.*?)}}", flags=re.DOTALL)
+ token_pattern = re.compile(r"{{\s*(if\b.*?|endif)\s*}}", flags=re.DOTALL)
+
+ def _process(text: str) -> str:
+ m_if = if_pattern.search(text)
+ if not m_if:
+ return text
+
+ depth = 1
+ pos = m_if.end()
+ while True:
+ m = token_pattern.search(text, pos)
+ if not m:
+ # Unterminated if-block, do not modify text
+ return text
+ token = m.group(1)
+ depth += 1 if token.startswith("if ") else -1
+ if depth == 0:
+ break
+ pos = m.end()
+
+ before = text[: m_if.start()]
+ condition = m_if.group(1).strip()
+ inner = text[m_if.end() : m.start()]
+ after = text[m.end() :]
+
+ try:
+ result = simple_eval(condition, names=kwargs)
+ except Exception:
+ # On evaluation error, do not modify this block
+ return text
+
+ if result:
+ # Keep inner content (processed recursively), remove if/endif markers
+ kept = before + _process(inner)
+ else:
+ # Skip entire block, including inner content and markers
+ kept = before
+
+ # Continue processing the remaining text after this block
+ return kept + _process(after)
+
+ return _process(_content)
+
+
+def read_file(relative_path: str, encoding="utf-8"):
# Try to get the absolute path for the file from the original directory or backup directories
absolute_path = get_abs_path(relative_path)
@@ -146,7 +212,7 @@ def read_file(relative_path:str, encoding="utf-8"):
return f.read()
-def read_file_bin(relative_path:str):
+def read_file_bin(relative_path: str):
# Try to get the absolute path for the file from the original directory or backup directories
absolute_path = get_abs_path(relative_path)
@@ -177,8 +243,9 @@ def replace_placeholders_json(_content: str, **kwargs):
# Replace placeholders with values from kwargs
for key, value in kwargs.items():
placeholder = "{{" + key + "}}"
- strval = json.dumps(value)
- _content = _content.replace(placeholder, strval)
+ if placeholder in _content:
+ strval = json.dumps(value)
+ _content = _content.replace(placeholder, strval)
return _content
@@ -248,6 +315,7 @@ def find_file_in_dirs(_filename: str, _directories: list[str]):
f"File '{_filename}' not found in any of the provided directories."
)
+
def get_unique_filenames_in_dirs(dir_paths: list[str], pattern: str = "*"):
# returns absolute paths for unique filenames, priority by order in dir_paths
seen = set()
@@ -263,6 +331,7 @@ def get_unique_filenames_in_dirs(dir_paths: list[str], pattern: str = "*"):
result.sort(key=lambda path: os.path.basename(path))
return result
+
def remove_code_fences(text):
# Pattern to match code fences with optional language specifier
pattern = r"(```|~~~)(.*?\n)(.*?)(\1)"
@@ -335,6 +404,45 @@ def delete_dir(relative_path: str):
pass
+def move_dir(old_path: str, new_path: str):
+ # rename/move the directory from old_path to new_path (both relative)
+ abs_old = get_abs_path(old_path)
+ abs_new = get_abs_path(new_path)
+ if not os.path.isdir(abs_old):
+ return # nothing to rename
+ try:
+ os.rename(abs_old, abs_new)
+ except Exception:
+ pass # suppress all errors, keep behavior consistent
+
+
+# move dir safely, remove with number if needed
+def move_dir_safe(src, dst, rename_format="{name}_{number}"):
+ base_dst = dst
+ i = 2
+ while exists(dst):
+ dst = rename_format.format(name=base_dst, number=i)
+ i += 1
+ move_dir(src, dst)
+ return dst
+
+
+# create dir safely, add number if needed
+def create_dir_safe(dst, rename_format="{name}_{number}"):
+ base_dst = dst
+ i = 2
+ while exists(dst):
+ dst = rename_format.format(name=base_dst, number=i)
+ i += 1
+ create_dir(dst)
+ return dst
+
+
+def create_dir(relative_path: str):
+ abs_path = get_abs_path(relative_path)
+ os.makedirs(abs_path, exist_ok=True)
+
+
def list_files(relative_path: str, filter: str = "*"):
abs_path = get_abs_path(relative_path)
if not os.path.exists(abs_path):
@@ -351,18 +459,30 @@ def get_abs_path(*relative_paths):
"Convert relative paths to absolute paths based on the base directory."
return os.path.join(get_base_dir(), *relative_paths)
-def deabsolute_path(path:str):
+
+def deabsolute_path(path: str):
"Convert absolute paths to relative paths based on the base directory."
return os.path.relpath(path, get_base_dir())
-def fix_dev_path(path:str):
+
+def fix_dev_path(path: str):
"On dev environment, convert /a0/... paths to local absolute paths"
from python.helpers.runtime import is_development
+
if is_development():
if path.startswith("/a0/"):
path = path.replace("/a0/", "")
return get_abs_path(path)
+
+def normalize_a0_path(path: str):
+ "Convert absolute paths into /a0/... paths"
+ if is_in_base_dir(path):
+ deabs = deabsolute_path(path)
+ return "/a0/" + deabs
+ return path
+
+
def exists(*relative_paths):
path = get_abs_path(*relative_paths)
return os.path.exists(path)
@@ -436,4 +556,45 @@ def move_file(relative_path: str, new_path: str):
def safe_file_name(filename: str) -> str:
# Replace any character that's not alphanumeric, dash, underscore, or dot with underscore
- return re.sub(r'[^a-zA-Z0-9-._]', '_', filename)
+ return re.sub(r"[^a-zA-Z0-9-._]", "_", filename)
+
+
+def read_text_files_in_dir(
+ dir_path: str, max_size: int = 1024 * 1024, pattern: str = "*"
+) -> dict[str, str]:
+
+ abs_path = get_abs_path(dir_path)
+ if not os.path.exists(abs_path):
+ return {}
+ result = {}
+ for file_path in [os.path.join(abs_path, f) for f in os.listdir(abs_path)]:
+ try:
+ if not os.path.isfile(file_path):
+ continue
+ if not fnmatch(os.path.basename(file_path), pattern):
+ continue
+ if max_size > 0 and os.path.getsize(file_path) > max_size:
+ continue
+ mime, _ = mimetypes.guess_type(file_path)
+ if mime is not None and not mime.startswith("text"):
+ continue
+ # Check if file is binary by reading a small chunk
+ content = read_file(file_path)
+ result[os.path.basename(file_path)] = content
+ except Exception:
+ continue
+ return result
+
+def list_files_in_dir_recursively(relative_path: str) -> list[str]:
+ abs_path = get_abs_path(relative_path)
+ if not os.path.exists(abs_path):
+ return []
+ result = []
+ for root, dirs, files in os.walk(abs_path):
+ for file in files:
+ file_path = os.path.join(root, file)
+ # Return relative path from the base directory
+ rel_path = os.path.relpath(file_path, abs_path)
+ result.append(rel_path)
+ return result
+
\ No newline at end of file
diff --git a/python/helpers/git.py b/python/helpers/git.py
index 0e112f4a71..33e3bec224 100644
--- a/python/helpers/git.py
+++ b/python/helpers/git.py
@@ -47,4 +47,11 @@ def get_git_info():
"version": version
}
- return git_info
\ No newline at end of file
+ return git_info
+
+def get_version():
+ try:
+ git_info = get_git_info()
+ return str(git_info.get("short_tag", "")).strip() or "unknown"
+ except Exception:
+ return "unknown"
\ No newline at end of file
diff --git a/python/helpers/knowledge_import.py b/python/helpers/knowledge_import.py
index 4457650505..a68fe9b825 100644
--- a/python/helpers/knowledge_import.py
+++ b/python/helpers/knowledge_import.py
@@ -36,6 +36,7 @@ def load_knowledge(
index: Dict[str, KnowledgeImport],
metadata: dict[str, Any] = {},
filename_pattern: str = "**/*",
+ recursive: bool = True,
) -> Dict[str, KnowledgeImport]:
"""
Load knowledge files from a directory with change detection and metadata enhancement.
@@ -96,7 +97,7 @@ def load_knowledge(
# Fetch all files in the directory with specified extensions
try:
- kn_files = glob.glob(os.path.join(knowledge_dir, filename_pattern), recursive=True)
+ kn_files = glob.glob(os.path.join(knowledge_dir, filename_pattern), recursive=recursive)
kn_files = [f for f in kn_files if os.path.isfile(f) and not os.path.basename(f).startswith('.')]
except Exception as e:
PrintStyle(font_color="red").print(f"Error scanning knowledge directory {knowledge_dir}: {e}")
diff --git a/python/helpers/log.py b/python/helpers/log.py
index 098862a954..d231810d25 100644
--- a/python/helpers/log.py
+++ b/python/helpers/log.py
@@ -1,6 +1,7 @@
from dataclasses import dataclass, field
import json
-from typing import Any, Literal, Optional, Dict, TypeVar
+import time
+from typing import Any, Literal, Optional, Dict, TypeVar, TYPE_CHECKING
T = TypeVar("T")
import uuid
@@ -8,6 +9,11 @@
from python.helpers.strings import truncate_text_by_ratio
import copy
from typing import TypeVar
+from python.helpers.secrets import get_secrets_manager
+
+
+if TYPE_CHECKING:
+ from agent import AgentContext
T = TypeVar("T")
@@ -31,9 +37,10 @@
HEADING_MAX_LEN: int = 120
-CONTENT_MAX_LEN: int = 10000
+CONTENT_MAX_LEN: int = 15_000
+RESPONSE_CONTENT_MAX_LEN: int = 250_000
KEY_MAX_LEN: int = 60
-VALUE_MAX_LEN: int = 3000
+VALUE_MAX_LEN: int = 5000
PROGRESS_MAX_LEN: int = 120
@@ -88,18 +95,21 @@ def _truncate_value(val: T) -> T:
return truncated
-def _truncate_content(text: str | None) -> str:
+def _truncate_content(text: str | None, type: Type) -> str:
+
+ max_len = CONTENT_MAX_LEN if type != "response" else RESPONSE_CONTENT_MAX_LEN
+
if text is None:
return ""
raw = str(text)
- if len(raw) <= CONTENT_MAX_LEN:
+ if len(raw) <= max_len:
return raw
# Same dynamic replacement logic as value truncation
- removed = len(raw) - CONTENT_MAX_LEN
+ removed = len(raw) - max_len
while True:
replacement = f"\n\n<< {removed} Characters hidden >>\n\n"
- truncated = truncate_text_by_ratio(raw, CONTENT_MAX_LEN, replacement, ratio=0.3)
+ truncated = truncate_text_by_ratio(raw, max_len, replacement, ratio=0.3)
new_removed = len(raw) - (len(truncated) - len(replacement))
if new_removed == removed:
break
@@ -107,31 +117,14 @@ def _truncate_content(text: str | None) -> str:
return truncated
-def _mask_recursive(obj: T) -> T:
- """Recursively mask secrets in nested objects."""
- try:
- from python.helpers.secrets import SecretsManager
-
- secrets_mgr = SecretsManager.get_instance()
- if isinstance(obj, str):
- return secrets_mgr.mask_values(obj)
- elif isinstance(obj, dict):
- return {k: _mask_recursive(v) for k, v in obj.items()} # type: ignore
- elif isinstance(obj, list):
- return [_mask_recursive(item) for item in obj] # type: ignore
- else:
- return obj
- except Exception as _e:
- # If masking fails, return original object
- return obj
@dataclass
class LogItem:
log: "Log"
no: int
- type: str
+ type: Type
heading: str = ""
content: str = ""
temp: bool = False
@@ -139,9 +132,13 @@ class LogItem:
kvps: Optional[OrderedDict] = None # Use OrderedDict for kvps
id: Optional[str] = None # Add id field
guid: str = ""
+ timestamp: float = 0.0
+ duration_ms: Optional[int] = None
+ agent_number: int = 0
def __post_init__(self):
self.guid = self.log.guid
+ self.timestamp = self.timestamp or time.time()
def update(
self,
@@ -189,12 +186,16 @@ def output(self):
"content": self.content,
"temp": self.temp,
"kvps": self.kvps,
+ "timestamp": self.timestamp,
+ "duration_ms": self.duration_ms,
+ "agent_number": self.agent_number,
}
class Log:
def __init__(self):
+ self.context: "AgentContext|None" = None # set from outside
self.guid: str = str(uuid.uuid4())
self.updates: list[int] = []
self.logs: list[LogItem] = []
@@ -208,16 +209,27 @@ def log(
kvps: dict | None = None,
temp: bool | None = None,
update_progress: ProgressUpdate | None = None,
- id: Optional[str] = None, # Add id parameter
+ id: Optional[str] = None,
**kwargs,
) -> LogItem:
# add a minimal item to the log
+ # Determine agent number from streaming agent
+ agent_number = 0
+ if self.context and self.context.streaming_agent:
+ agent_number = self.context.streaming_agent.number
+
item = LogItem(
log=self,
no=len(self.logs),
type=type,
+ agent_number=agent_number,
)
+ # Set duration on previous item and mark it as updated
+ if self.logs:
+ prev = self.logs[-1]
+ prev.duration_ms = int((item.timestamp - prev.timestamp) * 1000)
+ self.updates += [prev.no]
self.logs.append(item)
# and update it (to have just one implementation)
@@ -237,55 +249,56 @@ def log(
def _update_item(
self,
no: int,
- type: str | None = None,
+ type: Type | None = None,
heading: str | None = None,
content: str | None = None,
kvps: dict | None = None,
temp: bool | None = None,
update_progress: ProgressUpdate | None = None,
- id: Optional[str] = None, # Add id parameter
+ id: Optional[str] = None,
**kwargs,
):
item = self.logs[no]
+ if id is not None:
+ item.id = id
+
+ if type is not None:
+ item.type = type
+
+ if temp is not None:
+ item.temp = temp
+
+ if update_progress is not None:
+ item.update_progress = update_progress
+
+
# adjust all content before processing
if heading is not None:
- heading = _mask_recursive(heading)
+ heading = self._mask_recursive(heading)
heading = _truncate_heading(heading)
item.heading = heading
if content is not None:
- content = _mask_recursive(content)
- content = _truncate_content(content)
+ content = self._mask_recursive(content)
+ content = _truncate_content(content, item.type)
item.content = content
if kvps is not None:
kvps = OrderedDict(copy.deepcopy(kvps))
- kvps = _mask_recursive(kvps)
+ kvps = self._mask_recursive(kvps)
kvps = _truncate_value(kvps)
item.kvps = kvps
elif item.kvps is None:
item.kvps = OrderedDict()
if kwargs:
kwargs = copy.deepcopy(kwargs)
- kwargs = _mask_recursive(kwargs)
+ kwargs = self._mask_recursive(kwargs)
item.kvps.update(kwargs)
- if type is not None:
- item.type = type
-
- if update_progress is not None:
- item.update_progress = update_progress
-
- if temp is not None:
- item.temp = temp
-
- if id is not None:
- item.id = id
-
self.updates += [item.no]
self._update_progress_from_item(item)
def set_progress(self, progress: str, no: int = 0, active: bool = True):
- progress = _mask_recursive(progress)
+ progress = self._mask_recursive(progress)
progress = _truncate_progress(progress)
self.progress = progress
if not no:
@@ -324,3 +337,28 @@ def _update_progress_from_item(self, item: LogItem):
item.heading,
(item.no if item.update_progress == "persistent" else -1),
)
+
+ def _mask_recursive(self, obj: T) -> T:
+ """Recursively mask secrets in nested objects."""
+ try:
+ from agent import AgentContext
+ secrets_mgr = get_secrets_manager(self.context or AgentContext.current())
+
+ # debug helper to identify context mismatch
+ # self_id = self.context.id if self.context else None
+ # current_ctx = AgentContext.current()
+ # current_id = current_ctx.id if current_ctx else None
+ # if self_id != current_id:
+ # print(f"Context ID mismatch: {self_id} != {current_id}")
+
+ if isinstance(obj, str):
+ return secrets_mgr.mask_values(obj)
+ elif isinstance(obj, dict):
+ return {k: self._mask_recursive(v) for k, v in obj.items()} # type: ignore
+ elif isinstance(obj, list):
+ return [self._mask_recursive(item) for item in obj] # type: ignore
+ else:
+ return obj
+ except Exception as _e:
+ # If masking fails, return original object
+ return obj
\ No newline at end of file
diff --git a/python/helpers/login.py b/python/helpers/login.py
new file mode 100644
index 0000000000..046f6131db
--- /dev/null
+++ b/python/helpers/login.py
@@ -0,0 +1,15 @@
+from python.helpers import dotenv
+import hashlib
+
+
+def get_credentials_hash():
+ user = dotenv.get_dotenv_value("AUTH_LOGIN")
+ password = dotenv.get_dotenv_value("AUTH_PASSWORD")
+ if not user:
+ return None
+ return hashlib.sha256(f"{user}:{password}".encode()).hexdigest()
+
+
+def is_login_required():
+ user = dotenv.get_dotenv_value("AUTH_LOGIN")
+ return bool(user)
diff --git a/python/helpers/mcp_server.py b/python/helpers/mcp_server.py
index 4c080da69c..3c0308ed9c 100644
--- a/python/helpers/mcp_server.py
+++ b/python/helpers/mcp_server.py
@@ -3,23 +3,26 @@
from urllib.parse import urlparse
from openai import BaseModel
from pydantic import Field
-from fastmcp import FastMCP
+from fastmcp import FastMCP # type: ignore
+import contextvars
from agent import AgentContext, AgentContextType, UserMessage
from python.helpers.persist_chat import remove_chat
from initialize import initialize_agent
from python.helpers.print_style import PrintStyle
-from python.helpers import settings
+from python.helpers import settings, projects
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.types import ASGIApp, Receive, Scope, Send
-from fastmcp.server.http import create_sse_app
+from fastmcp.server.http import create_sse_app # type: ignore
from starlette.requests import Request
import threading
_PRINTER = PrintStyle(italic=True, font_color="green", padding=False)
+# Context variable to store project name from URL (per-request)
+_mcp_project_name: contextvars.ContextVar[str | None] = contextvars.ContextVar('mcp_project_name', default=None)
mcp_server: FastMCP = FastMCP(
name="Agent Zero integrated MCP Server",
@@ -127,6 +130,9 @@ async def send_message(
description="The response from the remote Agent Zero Instance", title="response"
),
]:
+ # Get project name from context variable (set in proxy __call__)
+ project_name = _mcp_project_name.get()
+
context: AgentContext | None = None
if chat_id:
context = AgentContext.get(chat_id)
@@ -137,10 +143,26 @@ async def send_message(
# whether we should save the chat or delete it afterwards
# If we continue a conversation, it must be persistent
persistent_chat = True
+
+ # Validation: if project is in URL but context has different project
+ if project_name:
+ existing_project = context.get_data(projects.CONTEXT_DATA_KEY_PROJECT)
+ if existing_project and existing_project != project_name:
+ return ToolError(
+ error=f"Chat belongs to project '{existing_project}' but URL specifies '{project_name}'",
+ chat_id=chat_id
+ )
else:
config = initialize_agent()
context = AgentContext(config=config, type=AgentContextType.BACKGROUND)
+ # Activate project if specified in URL
+ if project_name:
+ try:
+ projects.activate_project(context.id, project_name)
+ except Exception as e:
+ return ToolError(error=f"Failed to activate project: {str(e)}", chat_id="")
+
if not message:
return ToolError(
error="Message is required", chat_id=context.id if persistent_chat else ""
@@ -325,10 +347,10 @@ def reconfigure(self, token: str):
def _create_custom_http_app(self, streamable_http_path, auth_server_provider, auth_settings, debug, routes):
"""Create a custom HTTP app that manages the session manager manually."""
- from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app
- from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
+ from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app # type: ignore
+ from mcp.server.streamable_http_manager import StreamableHTTPSessionManager # type: ignore
from starlette.routing import Mount
- from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware
+ from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware # type: ignore
import anyio
server_routes = []
@@ -408,12 +430,44 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
# Route based on path
path = scope.get("path", "")
- if f"/t-{self.token}/sse" in path or f"t-{self.token}/messages" in path:
- # Route to SSE app
- await sse_app(scope, receive, send)
- elif f"/t-{self.token}/http" in path:
- # Route to HTTP app
- await http_app(scope, receive, send)
+ # Check for token in path (with or without project segment)
+ # Patterns: /t-{token}/sse, /t-{token}/p-{project}/sse, etc.
+ has_token = f"/t-{self.token}/" in path or f"t-{self.token}/" in path
+
+ # Extract project from path BEFORE cleaning and set in context variable
+ project_name = None
+ if "/p-" in path:
+ try:
+ parts = path.split("/p-")
+ if len(parts) > 1:
+ project_part = parts[1].split("/")[0]
+ if project_part:
+ project_name = project_part
+ _PRINTER.print(f"[MCP] Proxy extracted project from URL: {project_name}")
+ except Exception as e:
+ _PRINTER.print(f"[MCP] Failed to extract project in proxy: {e}")
+
+ # Store project in context variable (will be available in send_message)
+ _mcp_project_name.set(project_name)
+
+ # Strip project segment from path if present (e.g., /p-project_name/)
+ # This is needed because the underlying MCP apps were configured without project paths
+ cleaned_path = path
+ if "/p-" in path:
+ # Remove /p-{project}/ segment: /t-TOKEN/p-PROJECT/sse -> /t-TOKEN/sse
+ import re
+ cleaned_path = re.sub(r'/p-[^/]+/', '/', path)
+
+ # Update scope with cleaned path for the underlying app
+ modified_scope = dict(scope)
+ modified_scope['path'] = cleaned_path
+
+ if has_token and ("/sse" in path or "/messages" in path):
+ # Route to SSE app with cleaned path
+ await sse_app(modified_scope, receive, send)
+ elif has_token and "/http" in path:
+ # Route to HTTP app with cleaned path
+ await http_app(modified_scope, receive, send)
else:
raise StarletteHTTPException(
status_code=403, detail="MCP forbidden"
@@ -421,7 +475,7 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
async def mcp_middleware(request: Request, call_next):
-
+ """Middleware to check if MCP server is enabled."""
# check if MCP server is enabled
cfg = settings.get_settings()
if not cfg["mcp_server_enabled"]:
diff --git a/python/helpers/memory.py b/python/helpers/memory.py
index 9e7fb5eb47..8c8785c5af 100644
--- a/python/helpers/memory.py
+++ b/python/helpers/memory.py
@@ -28,7 +28,7 @@
from python.helpers import knowledge_import
from python.helpers.log import Log, LogItem
from enum import Enum
-from agent import Agent
+from agent import Agent, AgentContext
import models
import logging
from simpleeval import simple_eval
@@ -63,7 +63,7 @@ class Area(Enum):
@staticmethod
async def get(agent: Agent):
- memory_subdir = agent.config.memory_subdir or "default"
+ memory_subdir = get_agent_memory_subdir(agent)
if Memory.index.get(memory_subdir) is None:
log_item = agent.context.log.log(
type="util",
@@ -77,10 +77,11 @@ async def get(agent: Agent):
)
Memory.index[memory_subdir] = db
wrap = Memory(db, memory_subdir=memory_subdir)
- if agent.config.knowledge_subdirs:
- await wrap.preload_knowledge(
- log_item, agent.config.knowledge_subdirs, memory_subdir
- )
+ knowledge_subdirs = get_knowledge_subdirs_by_memory_subdir(
+ memory_subdir, agent.config.knowledge_subdirs or []
+ )
+ if knowledge_subdirs:
+ await wrap.preload_knowledge(log_item, knowledge_subdirs, memory_subdir)
return wrap
else:
return Memory(
@@ -106,16 +107,20 @@ async def get_by_subdir(
in_memory=False,
)
wrap = Memory(db, memory_subdir=memory_subdir)
- if preload_knowledge and agent_config.knowledge_subdirs:
- await wrap.preload_knowledge(
- log_item, agent_config.knowledge_subdirs, memory_subdir
+ if preload_knowledge:
+ knowledge_subdirs = get_knowledge_subdirs_by_memory_subdir(
+ memory_subdir, agent_config.knowledge_subdirs or []
)
+ if knowledge_subdirs:
+ await wrap.preload_knowledge(
+ log_item, knowledge_subdirs, memory_subdir
+ )
Memory.index[memory_subdir] = db
return Memory(db=Memory.index[memory_subdir], memory_subdir=memory_subdir)
@staticmethod
async def reload(agent: Agent):
- memory_subdir = agent.config.memory_subdir or "default"
+ memory_subdir = get_agent_memory_subdir(agent)
if Memory.index.get(memory_subdir):
del Memory.index[memory_subdir]
return await Memory.get(agent)
@@ -136,7 +141,7 @@ def initialize(
em_dir = files.get_abs_path(
"memory/embeddings"
) # just caching, no need to parameterize
- db_dir = Memory._abs_db_dir(memory_subdir)
+ db_dir = abs_db_dir(memory_subdir)
# make sure embeddings and database directories exist
os.makedirs(db_dir, exist_ok=True)
@@ -249,7 +254,7 @@ async def preload_knowledge(
log_item.update(heading="Preloading knowledge...")
# db abs path
- db_dir = Memory._abs_db_dir(memory_subdir)
+ db_dir = abs_db_dir(memory_subdir)
# Load the index file if it exists
index_path = files.get_abs_path(db_dir, "knowledge_import.json")
@@ -298,12 +303,24 @@ def _preload_knowledge_folders(
):
# load knowledge folders, subfolders by area
for kn_dir in kn_dirs:
+ # everything in the root of the knowledge goes to main
+ index = knowledge_import.load_knowledge(
+ log_item,
+ abs_knowledge_dir(kn_dir),
+ index,
+ {"area": Memory.Area.MAIN},
+ filename_pattern="*",
+ recursive=False,
+ )
+ # subdirectories go to their folders
for area in Memory.Area:
index = knowledge_import.load_knowledge(
log_item,
- files.get_abs_path("knowledge", kn_dir, area.value),
+ # files.get_abs_path("knowledge", kn_dir, area.value),
+ abs_knowledge_dir(kn_dir, area.value),
index,
{"area": area.value},
+ recursive=True,
)
# load instruments descriptions
@@ -313,6 +330,7 @@ def _preload_knowledge_folders(
index,
{"area": Memory.Area.INSTRUMENTS.value},
filename_pattern="**/*.md",
+ recursive=True,
)
return index
@@ -418,7 +436,7 @@ def _generate_doc_id(self):
@staticmethod
def _save_db_file(db: MyFaiss, memory_subdir: str):
- abs_dir = Memory._abs_db_dir(memory_subdir)
+ abs_dir = abs_db_dir(memory_subdir)
db.save_local(folder_path=abs_dir)
@staticmethod
@@ -446,10 +464,6 @@ def _cosine_normalizer(val: float) -> float:
) # float precision can cause values like 1.0000000596046448
return res
- @staticmethod
- def _abs_db_dir(memory_subdir: str) -> str:
- return files.get_abs_path("memory", memory_subdir)
-
@staticmethod
def format_docs_plain(docs: list[Document]) -> list[str]:
result = []
@@ -466,10 +480,6 @@ def get_timestamp():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
-def get_memory_subdir_abs(agent: Agent) -> str:
- return files.get_abs_path("memory", agent.config.memory_subdir or "default")
-
-
def get_custom_knowledge_subdir_abs(agent: Agent) -> str:
for dir in agent.config.knowledge_subdirs:
if dir != "default":
@@ -480,3 +490,86 @@ def get_custom_knowledge_subdir_abs(agent: Agent) -> str:
def reload():
# clear the memory index, this will force all DBs to reload
Memory.index = {}
+
+
+def abs_db_dir(memory_subdir: str) -> str:
+ # patch for projects, this way we don't need to re-work the structure of memory subdirs
+ if memory_subdir.startswith("projects/"):
+ from python.helpers.projects import get_project_meta_folder
+
+ return files.get_abs_path(get_project_meta_folder(memory_subdir[9:]), "memory")
+ # standard subdirs
+ return files.get_abs_path("memory", memory_subdir)
+
+
+def abs_knowledge_dir(knowledge_subdir: str, *sub_dirs: str) -> str:
+ # patch for projects, this way we don't need to re-work the structure of knowledge subdirs
+ if knowledge_subdir.startswith("projects/"):
+ from python.helpers.projects import get_project_meta_folder
+
+ return files.get_abs_path(
+ get_project_meta_folder(knowledge_subdir[9:]), "knowledge", *sub_dirs
+ )
+ # standard subdirs
+ return files.get_abs_path("knowledge", knowledge_subdir, *sub_dirs)
+
+
+def get_memory_subdir_abs(agent: Agent) -> str:
+ subdir = get_agent_memory_subdir(agent)
+ return abs_db_dir(subdir)
+
+
+def get_agent_memory_subdir(agent: Agent) -> str:
+ # if project is active, use project memory subdir
+ return get_context_memory_subdir(agent.context)
+
+
+def get_context_memory_subdir(context: AgentContext) -> str:
+ # if project is active, use project memory subdir
+ from python.helpers.projects import (
+ get_context_memory_subdir as get_project_memory_subdir,
+ )
+
+ memory_subdir = get_project_memory_subdir(context)
+ if memory_subdir:
+ return memory_subdir
+
+ # no project, regular memory subdir
+ return context.config.memory_subdir or "default"
+
+
+def get_existing_memory_subdirs() -> list[str]:
+ try:
+ from python.helpers.projects import (
+ get_project_meta_folder,
+ get_projects_parent_folder,
+ )
+
+ # Get subdirectories from memory folder
+ subdirs = files.get_subdirectories("memory", exclude="embeddings")
+
+ project_subdirs = files.get_subdirectories(get_projects_parent_folder())
+ for project_subdir in project_subdirs:
+ if files.exists(
+ get_project_meta_folder(project_subdir), "memory", "index.faiss"
+ ):
+ subdirs.append(f"projects/{project_subdir}")
+
+ # Ensure 'default' is always available
+ if "default" not in subdirs:
+ subdirs.insert(0, "default")
+
+ return subdirs
+ except Exception as e:
+ PrintStyle.error(f"Failed to get memory subdirectories: {str(e)}")
+ return ["default"]
+
+
+def get_knowledge_subdirs_by_memory_subdir(
+ memory_subdir: str, default: list[str]
+) -> list[str]:
+ if memory_subdir.startswith("projects/"):
+ from python.helpers.projects import get_project_meta_folder
+
+ default.append(get_project_meta_folder(memory_subdir[9:], "knowledge"))
+ return default
diff --git a/python/helpers/model_discovery.py b/python/helpers/model_discovery.py
new file mode 100644
index 0000000000..478d2de9f9
--- /dev/null
+++ b/python/helpers/model_discovery.py
@@ -0,0 +1,439 @@
+"""
+Dynamic Model Discovery Service for Agent Zero
+
+Fetches available models DIRECTLY from each provider's API based on
+which API keys the user has configured. No hardcoded model lists.
+
+Supported Providers:
+- OpenAI: GET https://api.openai.com/v1/models
+- Anthropic: GET https://api.anthropic.com/v1/models
+- Google Gemini: GET https://generativelanguage.googleapis.com/v1beta/models
+- Groq: GET https://api.groq.com/openai/v1/models
+- Mistral: GET https://api.mistral.ai/v1/models
+- DeepSeek: GET https://api.deepseek.com/models
+- xAI: GET https://api.x.ai/v1/models
+- OpenRouter: GET https://openrouter.ai/api/v1/models
+- SambaNova: GET https://api.sambanova.ai/v1/models
+- And any OpenAI-compatible provider with api_base set
+"""
+
+import json
+import os
+import time
+from typing import Any
+
+from python.helpers import files
+from python.helpers.print_style import PrintStyle
+from python.helpers.providers import FieldOption
+
+# Cache configuration
+CACHE_FILE = "tmp/model_cache.json"
+CACHE_TTL_SECONDS = 1 * 60 * 60 # 1 hour (more frequent than before since we want fresh data)
+
+# Provider API endpoints
+PROVIDER_ENDPOINTS = {
+ "openai": {
+ "url": "https://api.openai.com/v1/models",
+ "auth_type": "bearer",
+ },
+ "anthropic": {
+ "url": "https://api.anthropic.com/v1/models",
+ "auth_type": "anthropic",
+ },
+ "google": {
+ "url": "https://generativelanguage.googleapis.com/v1beta/models",
+ "auth_type": "query_key",
+ },
+ "groq": {
+ "url": "https://api.groq.com/openai/v1/models",
+ "auth_type": "bearer",
+ },
+ "mistral": {
+ "url": "https://api.mistral.ai/v1/models",
+ "auth_type": "bearer",
+ },
+ "deepseek": {
+ "url": "https://api.deepseek.com/models",
+ "auth_type": "bearer",
+ },
+ "xai": {
+ "url": "https://api.x.ai/v1/models",
+ "auth_type": "bearer",
+ },
+ "openrouter": {
+ "url": "https://openrouter.ai/api/v1/models",
+ "auth_type": "bearer",
+ "extra_headers": {
+ "HTTP-Referer": "https://agent-zero.ai/",
+ "X-Title": "Agent Zero",
+ },
+ },
+ "sambanova": {
+ "url": "https://api.sambanova.ai/v1/models",
+ "auth_type": "bearer",
+ },
+}
+
+# Providers that are OpenAI-compatible and can use custom api_base
+OPENAI_COMPATIBLE_PROVIDERS = {
+ "lm_studio",
+ "ollama",
+ "venice",
+ "a0_venice",
+ "azure",
+ "other",
+ "zai",
+ "zai_coding",
+}
+
+
+def _load_cache() -> dict[str, Any] | None:
+ """Load cached model data if valid."""
+ cache_path = files.get_abs_path(CACHE_FILE)
+ if not os.path.exists(cache_path):
+ return None
+
+ try:
+ with open(cache_path, "r", encoding="utf-8") as f:
+ cache = json.load(f)
+
+ # Check TTL
+ cached_at = cache.get("cached_at", 0)
+ if (time.time() - cached_at) > CACHE_TTL_SECONDS:
+ return None
+
+ return cache
+ except (json.JSONDecodeError, IOError) as e:
+ PrintStyle.warning(f"Failed to load model cache: {e}")
+ return None
+
+
+def _save_cache(data: dict[str, Any]):
+ """Save model data to cache."""
+ cache_path = files.get_abs_path(CACHE_FILE)
+ try:
+ os.makedirs(os.path.dirname(cache_path), exist_ok=True)
+ data["cached_at"] = time.time()
+ with open(cache_path, "w", encoding="utf-8") as f:
+ json.dump(data, f, indent=2)
+ except IOError as e:
+ PrintStyle.warning(f"Failed to save model cache: {e}")
+
+
+def _get_cached_models(provider: str, model_type: str) -> list[dict[str, str]] | None:
+ """Get cached models for a provider if available."""
+ cache = _load_cache()
+ if cache:
+ key = f"{provider}_{model_type}"
+ return cache.get("providers", {}).get(key)
+ return None
+
+
+def _cache_models(provider: str, model_type: str, models: list[dict[str, str]]):
+ """Cache models for a provider."""
+ cache = _load_cache() or {"providers": {}}
+ if "providers" not in cache:
+ cache["providers"] = {}
+ key = f"{provider}_{model_type}"
+ cache["providers"][key] = models
+ _save_cache(cache)
+
+
+def _filter_models_by_type(
+ models: list[dict[str, str]], model_type: str, provider: str
+) -> list[dict[str, str]]:
+ """Filter models based on type (chat vs embedding)."""
+ if model_type == "embedding":
+ # Look for embedding models
+ embedding_keywords = ["embed", "embedding", "text-embedding"]
+ return [
+ m for m in models
+ if any(kw in m["id"].lower() for kw in embedding_keywords)
+ ]
+ else:
+ # For chat, exclude embedding, whisper, tts, dall-e, moderation models
+ exclude_keywords = [
+ "embed", "whisper", "tts", "dall-e", "davinci", "babbage",
+ "moderation", "curie", "ada-", "text-ada", "text-babbage",
+ "text-curie", "text-davinci", "code-", "audio"
+ ]
+ # For OpenRouter, include all since they're all chat models
+ if provider == "openrouter":
+ return models
+ return [
+ m for m in models
+ if not any(kw in m["id"].lower() for kw in exclude_keywords)
+ ]
+
+
+async def _fetch_models_openai_compatible(
+ api_key: str,
+ base_url: str,
+ extra_headers: dict[str, str] | None = None,
+) -> list[dict[str, str]]:
+ """Fetch models from any OpenAI-compatible API."""
+ import httpx
+
+ headers = {
+ "Authorization": f"Bearer {api_key}",
+ "Content-Type": "application/json",
+ }
+ if extra_headers:
+ headers.update(extra_headers)
+
+ url = f"{base_url.rstrip('/')}/models"
+
+ try:
+ async with httpx.AsyncClient(timeout=15.0) as client:
+ response = await client.get(url, headers=headers)
+
+ if response.status_code != 200:
+ PrintStyle.warning(f"API returned status {response.status_code} from {url}")
+ return []
+
+ data = response.json()
+ models_data = data.get("data", [])
+
+ models = []
+ for m in models_data:
+ model_id = m.get("id", "")
+ if model_id:
+ # Use id as name, or use name field if available
+ name = m.get("name") or model_id
+ models.append({"id": model_id, "name": name})
+
+ return models
+
+ except httpx.HTTPError as e:
+ PrintStyle.warning(f"Failed to fetch models from {url}: {e}")
+ return []
+ except Exception as e:
+ PrintStyle.error(f"Unexpected error fetching models from {url}: {e}")
+ return []
+
+
+async def _fetch_models_anthropic(api_key: str) -> list[dict[str, str]]:
+ """Fetch models from Anthropic API."""
+ import httpx
+
+ headers = {
+ "x-api-key": api_key,
+ "anthropic-version": "2023-06-01",
+ "Content-Type": "application/json",
+ }
+
+ try:
+ async with httpx.AsyncClient(timeout=15.0) as client:
+ response = await client.get(
+ "https://api.anthropic.com/v1/models",
+ headers=headers,
+ )
+
+ if response.status_code != 200:
+ PrintStyle.warning(f"Anthropic API returned status {response.status_code}")
+ return []
+
+ data = response.json()
+ models_data = data.get("data", [])
+
+ models = []
+ for m in models_data:
+ model_id = m.get("id", "")
+ if model_id:
+ display_name = m.get("display_name") or model_id
+ models.append({"id": model_id, "name": display_name})
+
+ return models
+
+ except httpx.HTTPError as e:
+ PrintStyle.warning(f"Failed to fetch Anthropic models: {e}")
+ return []
+ except Exception as e:
+ PrintStyle.error(f"Unexpected error fetching Anthropic models: {e}")
+ return []
+
+
+async def _fetch_models_google(api_key: str) -> list[dict[str, str]]:
+ """Fetch models from Google Gemini API."""
+ import httpx
+
+ try:
+ async with httpx.AsyncClient(timeout=15.0) as client:
+ response = await client.get(
+ f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}",
+ )
+
+ if response.status_code != 200:
+ PrintStyle.warning(f"Google API returned status {response.status_code}")
+ return []
+
+ data = response.json()
+ models_data = data.get("models", [])
+
+ models = []
+ for m in models_data:
+ # Google returns names like "models/gemini-pro"
+ full_name = m.get("name", "")
+ model_id = full_name.replace("models/", "") if full_name.startswith("models/") else full_name
+ if model_id:
+ display_name = m.get("displayName") or model_id
+ models.append({"id": model_id, "name": display_name})
+
+ return models
+
+ except httpx.HTTPError as e:
+ PrintStyle.warning(f"Failed to fetch Google models: {e}")
+ return []
+ except Exception as e:
+ PrintStyle.error(f"Unexpected error fetching Google models: {e}")
+ return []
+
+
+async def _fetch_models_for_provider(
+ provider: str,
+ api_key: str,
+ api_base: str | None = None,
+) -> list[dict[str, str]]:
+ """Fetch models from a specific provider."""
+ if not api_key or api_key == "None" or api_key == "":
+ return []
+
+ # Handle Anthropic separately (different auth)
+ if provider == "anthropic":
+ return await _fetch_models_anthropic(api_key)
+
+ # Handle Google separately (query param auth)
+ if provider == "google":
+ return await _fetch_models_google(api_key)
+
+ # Handle known providers with predefined endpoints
+ if provider in PROVIDER_ENDPOINTS:
+ endpoint_config = PROVIDER_ENDPOINTS[provider]
+ return await _fetch_models_openai_compatible(
+ api_key=api_key,
+ base_url=endpoint_config["url"].rsplit("/models", 1)[0],
+ extra_headers=endpoint_config.get("extra_headers"),
+ )
+
+ # Handle OpenAI-compatible providers with custom api_base
+ if provider in OPENAI_COMPATIBLE_PROVIDERS and api_base:
+ return await _fetch_models_openai_compatible(
+ api_key=api_key,
+ base_url=api_base,
+ )
+
+ return []
+
+
+async def get_models_for_provider(
+ model_type: str,
+ provider: str,
+ api_keys: dict[str, str] | None = None,
+ api_base: str | None = None,
+ force_refresh: bool = False,
+) -> list[FieldOption]:
+ """
+ Get available models for a provider by fetching from their API.
+
+ Args:
+ model_type: Either 'chat' or 'embedding'
+ provider: Provider ID (e.g., 'openai', 'anthropic', 'openrouter')
+ api_keys: Dictionary of API keys keyed by provider name
+ api_base: Optional custom API base URL for OpenAI-compatible providers
+ force_refresh: If True, bypass cache
+
+ Returns:
+ List of FieldOption dicts with 'value' and 'label' keys
+ """
+ if api_keys is None:
+ api_keys = {}
+
+ # Get API key for this provider
+ api_key = api_keys.get(provider, "")
+
+ # Check cache first (unless force refresh)
+ if not force_refresh:
+ cached = _get_cached_models(provider, model_type)
+ if cached:
+ return _convert_to_options(cached)
+
+ # Fetch from provider API
+ models = await _fetch_models_for_provider(provider, api_key, api_base)
+
+ if models:
+ # Filter by model type
+ models = _filter_models_by_type(models, model_type, provider)
+
+ # Sort by name
+ models.sort(key=lambda x: x["name"].lower())
+
+ # Cache the results
+ _cache_models(provider, model_type, models)
+
+ return _convert_to_options(models)
+
+
+def _convert_to_options(models: list[dict[str, str]]) -> list[FieldOption]:
+ """Convert model list to FieldOption format."""
+ options: list[FieldOption] = []
+
+ for m in models:
+ options.append({
+ "value": m["id"],
+ "label": m["name"],
+ })
+
+ # Always add custom option at the end
+ options.append({
+ "value": "__custom__",
+ "label": "Custom (enter manually)",
+ })
+
+ return options
+
+
+def get_models_for_provider_sync(
+ model_type: str,
+ provider: str,
+ api_keys: dict[str, str] | None = None,
+) -> list[FieldOption]:
+ """
+ Synchronous version - returns cached models or empty list with custom option.
+ Used for initial settings load; async refresh happens on provider change.
+ """
+ if api_keys is None:
+ api_keys = {}
+
+ # Check cache
+ cached = _get_cached_models(provider, model_type)
+ if cached:
+ return _convert_to_options(cached)
+
+ # No cache available - return just the custom option
+ # The frontend will trigger an async refresh when the modal opens
+ return [{
+ "value": "__custom__",
+ "label": "Custom (enter manually)",
+ }]
+
+
+def clear_cache():
+ """Clear the model cache to force refresh on next request."""
+ cache_path = files.get_abs_path(CACHE_FILE)
+ if os.path.exists(cache_path):
+ try:
+ os.remove(cache_path)
+ PrintStyle.info("Model cache cleared")
+ except IOError as e:
+ PrintStyle.warning(f"Failed to clear model cache: {e}")
+
+
+def clear_provider_cache(provider: str, model_type: str = "chat"):
+ """Clear cache for a specific provider."""
+ cache = _load_cache()
+ if cache and "providers" in cache:
+ key = f"{provider}_{model_type}"
+ if key in cache["providers"]:
+ del cache["providers"][key]
+ _save_cache(cache)
+ PrintStyle.info(f"Cleared cache for {provider}/{model_type}")
diff --git a/python/helpers/persist_chat.py b/python/helpers/persist_chat.py
index 290bf9c876..9e4c20b6c7 100644
--- a/python/helpers/persist_chat.py
+++ b/python/helpers/persist_chat.py
@@ -123,6 +123,10 @@ def _serialize_context(context: AgentContext):
agents.append(_serialize_agent(agent))
agent = agent.data.get(Agent.DATA_NAME_SUBORDINATE, None)
+
+ data = {k: v for k, v in context.data.items() if not k.startswith("_")}
+ output_data = {k: v for k, v in context.output_data.items() if not k.startswith("_")}
+
return {
"id": context.id,
"name": context.name,
@@ -142,6 +146,8 @@ def _serialize_context(context: AgentContext):
context.streaming_agent.number if context.streaming_agent else 0
),
"log": _serialize_log(context.log),
+ "data": data,
+ "output_data": output_data,
}
@@ -190,6 +196,8 @@ def _deserialize_context(data):
),
log=log,
paused=False,
+ data=data.get("data", {}),
+ output_data=data.get("output_data", {}),
# agent0=agent0,
# streaming_agent=straming_agent,
)
@@ -254,17 +262,19 @@ def _deserialize_log(data: dict[str, Any]) -> "Log":
# Deserialize the list of LogItem objects
i = 0
for item_data in data.get("logs", []):
- log.logs.append(
- LogItem(
- log=log, # restore the log reference
- no=i, # item_data["no"],
- type=item_data["type"],
- heading=item_data.get("heading", ""),
- content=item_data.get("content", ""),
- kvps=OrderedDict(item_data["kvps"]) if item_data["kvps"] else None,
- temp=item_data.get("temp", False),
- )
- )
+ log.logs.append(LogItem(
+ log=log, # restore the log reference
+ no=i, # item_data["no"],
+ type=item_data["type"],
+ heading=item_data.get("heading", ""),
+ content=item_data.get("content", ""),
+ kvps=OrderedDict(item_data["kvps"]) if item_data["kvps"] else None,
+ temp=item_data.get("temp", False),
+ # Pass metrics directly to constructor
+ timestamp=item_data.get("timestamp", 0.0),
+ duration_ms=item_data.get("duration_ms"),
+ agent_number=item_data.get("agent_number", 0),
+ ))
log.updates.append(i)
i += 1
diff --git a/python/helpers/playwright.py b/python/helpers/playwright.py
index 34f851ab63..c352407b2b 100644
--- a/python/helpers/playwright.py
+++ b/python/helpers/playwright.py
@@ -1,4 +1,6 @@
+import os
+import sys
from pathlib import Path
import subprocess
from python.helpers import files
@@ -9,8 +11,14 @@
def get_playwright_binary():
pw_cache = Path(get_playwright_cache_dir())
- headless_shell = next(pw_cache.glob("chromium_headless_shell-*/chrome-*/headless_shell"), None)
- return headless_shell
+ for pattern in (
+ "chromium_headless_shell-*/chrome-*/headless_shell",
+ "chromium_headless_shell-*/chrome-*/headless_shell.exe",
+ ):
+ binary = next(pw_cache.glob(pattern), None)
+ if binary:
+ return binary
+ return None
def get_playwright_cache_dir():
return files.get_abs_path("tmp/playwright")
@@ -19,7 +27,6 @@ def ensure_playwright_binary():
bin = get_playwright_binary()
if not bin:
cache = get_playwright_cache_dir()
- import os
env = os.environ.copy()
env["PLAYWRIGHT_BROWSERS_PATH"] = cache
subprocess.check_call(
diff --git a/python/helpers/print_style.py b/python/helpers/print_style.py
index 6d6e5f4f58..188697c866 100644
--- a/python/helpers/print_style.py
+++ b/python/helpers/print_style.py
@@ -95,9 +95,10 @@ def get(self, *args, sep=' ', **kwargs):
# Automatically mask secrets in all print output
try:
- from python.helpers.secrets import SecretsManager
- secrets_mgr = SecretsManager.get_instance()
- text = secrets_mgr.mask_values(text)
+ if not hasattr(self, "secrets_mgr"):
+ from python.helpers.secrets import get_secrets_manager
+ self.secrets_mgr = get_secrets_manager()
+ text = self.secrets_mgr.mask_values(text)
except Exception:
# If masking fails, proceed without masking to avoid breaking functionality
pass
diff --git a/python/helpers/projects.py b/python/helpers/projects.py
new file mode 100644
index 0000000000..6e25738c6e
--- /dev/null
+++ b/python/helpers/projects.py
@@ -0,0 +1,429 @@
+import os
+from typing import Literal, TypedDict, TYPE_CHECKING
+
+from python.helpers import files, dirty_json, persist_chat, file_tree
+from python.helpers.print_style import PrintStyle
+
+
+if TYPE_CHECKING:
+ from agent import AgentContext
+
+PROJECTS_PARENT_DIR = "usr/projects"
+PROJECT_META_DIR = ".a0proj"
+PROJECT_INSTRUCTIONS_DIR = "instructions"
+PROJECT_KNOWLEDGE_DIR = "knowledge"
+PROJECT_HEADER_FILE = "project.json"
+
+CONTEXT_DATA_KEY_PROJECT = "project"
+
+
+class FileStructureInjectionSettings(TypedDict):
+ enabled: bool
+ max_depth: int
+ max_files: int
+ max_folders: int
+ max_lines: int
+ gitignore: str
+
+class SubAgentSettings(TypedDict):
+ enabled: bool
+
+class BasicProjectData(TypedDict):
+ title: str
+ description: str
+ instructions: str
+ color: str
+ memory: Literal[
+ "own", "global"
+ ] # in the future we can add cutom and point to another existing folder
+ file_structure: FileStructureInjectionSettings
+
+class EditProjectData(BasicProjectData):
+ name: str
+ instruction_files_count: int
+ knowledge_files_count: int
+ variables: str
+ secrets: str
+ subagents: dict[str, SubAgentSettings]
+
+
+
+def get_projects_parent_folder():
+ return files.get_abs_path(PROJECTS_PARENT_DIR)
+
+
+def get_project_folder(name: str):
+ return files.get_abs_path(get_projects_parent_folder(), name)
+
+
+def get_project_meta_folder(name: str, *sub_dirs: str):
+ return files.get_abs_path(get_project_folder(name), PROJECT_META_DIR, *sub_dirs)
+
+
+def delete_project(name: str):
+ abs_path = files.get_abs_path(PROJECTS_PARENT_DIR, name)
+ files.delete_dir(abs_path)
+ deactivate_project_in_chats(name)
+ return name
+
+
+def create_project(name: str, data: BasicProjectData):
+ abs_path = files.create_dir_safe(
+ files.get_abs_path(PROJECTS_PARENT_DIR, name), rename_format="{name}_{number}"
+ )
+ create_project_meta_folders(name)
+ data = _normalizeBasicData(data)
+ save_project_header(name, data)
+ return name
+
+
+def load_project_header(name: str):
+ abs_path = files.get_abs_path(
+ PROJECTS_PARENT_DIR, name, PROJECT_META_DIR, PROJECT_HEADER_FILE
+ )
+ header: dict = dirty_json.parse(files.read_file(abs_path)) # type: ignore
+ header["name"] = name
+ return header
+
+
+def _default_file_structure_settings():
+ try:
+ gitignore = files.read_file("conf/projects.default.gitignore")
+ except Exception:
+ gitignore = ""
+ return FileStructureInjectionSettings(
+ enabled=True,
+ max_depth=5,
+ max_files=20,
+ max_folders=20,
+ max_lines=250,
+ gitignore=gitignore,
+ )
+
+
+def _normalizeBasicData(data: BasicProjectData):
+ return BasicProjectData(
+ title=data.get("title", ""),
+ description=data.get("description", ""),
+ instructions=data.get("instructions", ""),
+ color=data.get("color", ""),
+ memory=data.get("memory", "own"),
+ file_structure=data.get(
+ "file_structure",
+ _default_file_structure_settings(),
+ ),
+ )
+
+
+def _normalizeEditData(data: EditProjectData):
+ return EditProjectData(
+ name=data.get("name", ""),
+ title=data.get("title", ""),
+ description=data.get("description", ""),
+ instructions=data.get("instructions", ""),
+ variables=data.get("variables", ""),
+ color=data.get("color", ""),
+ instruction_files_count=data.get("instruction_files_count", 0),
+ knowledge_files_count=data.get("knowledge_files_count", 0),
+ secrets=data.get("secrets", ""),
+ memory=data.get("memory", "own"),
+ file_structure=data.get(
+ "file_structure",
+ _default_file_structure_settings(),
+ ),
+ subagents=data.get("subagents", {}),
+ )
+
+
+def _edit_data_to_basic_data(data: EditProjectData):
+ return _normalizeBasicData(data)
+
+
+def _basic_data_to_edit_data(data: BasicProjectData):
+ return _normalizeEditData(data) # type: ignore
+
+
+def update_project(name: str, data: EditProjectData):
+ # merge with current state
+ current = load_edit_project_data(name)
+ current.update(data)
+ current = _normalizeEditData(current)
+
+ # save header data
+ header = _edit_data_to_basic_data(current)
+ save_project_header(name, header)
+
+ # save secrets
+ save_project_variables(name, current["variables"])
+ save_project_secrets(name, current["secrets"])
+ save_project_subagents(name, current["subagents"])
+
+ reactivate_project_in_chats(name)
+ return name
+
+
+def load_basic_project_data(name: str) -> BasicProjectData:
+ data = BasicProjectData(**load_project_header(name))
+ normalized = _normalizeBasicData(data)
+ return normalized
+
+
+def load_edit_project_data(name: str) -> EditProjectData:
+ data = load_basic_project_data(name)
+ additional_instructions = get_additional_instructions_files(
+ name
+ ) # for additional info
+ variables = load_project_variables(name)
+ secrets = load_project_secrets_masked(name)
+ subagents = load_project_subagents(name)
+ knowledge_files_count = get_knowledge_files_count(name)
+ data = EditProjectData(
+ **data,
+ name=name,
+ instruction_files_count=len(additional_instructions),
+ knowledge_files_count=knowledge_files_count,
+ variables=variables,
+ secrets=secrets,
+ subagents=subagents,
+ )
+ data = _normalizeEditData(data)
+ return data
+
+
+def save_project_header(name: str, data: BasicProjectData):
+ # save project header file
+ header = dirty_json.stringify(data)
+ abs_path = files.get_abs_path(
+ PROJECTS_PARENT_DIR, name, PROJECT_META_DIR, PROJECT_HEADER_FILE
+ )
+
+ files.write_file(abs_path, header)
+
+
+def get_active_projects_list():
+ return _get_projects_list(get_projects_parent_folder())
+
+
+def _get_projects_list(parent_dir):
+ projects = []
+
+ # folders in project directory
+ for name in os.listdir(parent_dir):
+ try:
+ abs_path = os.path.join(parent_dir, name)
+ if os.path.isdir(abs_path):
+ project_data = load_basic_project_data(name)
+ projects.append(
+ {
+ "name": name,
+ "title": project_data.get("title", ""),
+ "description": project_data.get("description", ""),
+ "color": project_data.get("color", ""),
+ }
+ )
+ except Exception as e:
+ PrintStyle.error(f"Error loading project {name}: {str(e)}")
+
+ # sort projects by name
+ projects.sort(key=lambda x: x["name"])
+ return projects
+
+
+def activate_project(context_id: str, name: str):
+ from agent import AgentContext
+
+ data = load_edit_project_data(name)
+ context = AgentContext.get(context_id)
+ if context is None:
+ raise Exception("Context not found")
+ display_name = str(data.get("title", name))
+ display_name = display_name[:22] + "..." if len(display_name) > 25 else display_name
+ context.set_data(CONTEXT_DATA_KEY_PROJECT, name)
+ context.set_output_data(
+ CONTEXT_DATA_KEY_PROJECT,
+ {"name": name, "title": display_name, "color": data.get("color", "")},
+ )
+
+ # persist
+ persist_chat.save_tmp_chat(context)
+
+
+def deactivate_project(context_id: str):
+ from agent import AgentContext
+
+ context = AgentContext.get(context_id)
+ if context is None:
+ raise Exception("Context not found")
+ context.set_data(CONTEXT_DATA_KEY_PROJECT, None)
+ context.set_output_data(CONTEXT_DATA_KEY_PROJECT, None)
+
+ # persist
+ persist_chat.save_tmp_chat(context)
+
+
+def reactivate_project_in_chats(name: str):
+ from agent import AgentContext
+
+ for context in AgentContext.all():
+ if context.get_data(CONTEXT_DATA_KEY_PROJECT) == name:
+ activate_project(context.id, name)
+ persist_chat.save_tmp_chat(context)
+
+
+def deactivate_project_in_chats(name: str):
+ from agent import AgentContext
+
+ for context in AgentContext.all():
+ if context.get_data(CONTEXT_DATA_KEY_PROJECT) == name:
+ deactivate_project(context.id)
+ persist_chat.save_tmp_chat(context)
+
+
+def build_system_prompt_vars(name: str):
+ project_data = load_basic_project_data(name)
+ main_instructions = project_data.get("instructions", "") or ""
+ additional_instructions = get_additional_instructions_files(name)
+ complete_instructions = (
+ main_instructions
+ + "\n\n".join(
+ additional_instructions[k] for k in sorted(additional_instructions)
+ )
+ ).strip()
+ return {
+ "project_name": project_data.get("title", ""),
+ "project_description": project_data.get("description", ""),
+ "project_instructions": complete_instructions or "",
+ "project_path": files.normalize_a0_path(get_project_folder(name)),
+ }
+
+
+def get_additional_instructions_files(name: str):
+ instructions_folder = files.get_abs_path(
+ get_project_folder(name), PROJECT_META_DIR, PROJECT_INSTRUCTIONS_DIR
+ )
+ return files.read_text_files_in_dir(instructions_folder)
+
+
+def get_context_project_name(context: "AgentContext") -> str | None:
+ return context.get_data(CONTEXT_DATA_KEY_PROJECT)
+
+
+def load_project_variables(name: str):
+ try:
+ abs_path = files.get_abs_path(get_project_meta_folder(name), "variables.env")
+ return files.read_file(abs_path)
+ except Exception:
+ return ""
+
+
+def save_project_variables(name: str, variables: str):
+ abs_path = files.get_abs_path(get_project_meta_folder(name), "variables.env")
+ files.write_file(abs_path, variables)
+
+
+def load_project_subagents(name: str) -> dict[str, SubAgentSettings]:
+ try:
+ abs_path = files.get_abs_path(get_project_meta_folder(name), "agents.json")
+ data = dirty_json.parse(files.read_file(abs_path))
+ if isinstance(data, dict):
+ return _normalize_subagents(data) # type: ignore[arg-type,return-value]
+ return {}
+ except Exception:
+ return {}
+
+
+def save_project_subagents(name: str, subagents_data: dict[str, SubAgentSettings]):
+ abs_path = files.get_abs_path(get_project_meta_folder(name), "agents.json")
+ normalized = _normalize_subagents(subagents_data)
+ content = dirty_json.stringify(normalized)
+ files.write_file(abs_path, content)
+
+
+def _normalize_subagents(
+ subagents_data: dict[str, SubAgentSettings]
+) -> dict[str, SubAgentSettings]:
+ from python.helpers import subagents
+
+ agents_dict = subagents.get_agents_dict()
+
+ normalized: dict[str, SubAgentSettings] = {}
+ for key, value in subagents_data.items():
+ agent = agents_dict.get(key)
+ if not agent:
+ continue
+
+ enabled = bool(value["enabled"])
+ if agent.enabled == enabled:
+ continue
+
+ normalized[key] = {"enabled": enabled}
+
+ return normalized
+
+
+def load_project_secrets_masked(name: str, merge_with_global=False):
+ from python.helpers import secrets
+
+ mgr = secrets.get_project_secrets_manager(name, merge_with_global)
+ return mgr.get_masked_secrets()
+
+
+def save_project_secrets(name: str, secrets: str):
+ from python.helpers.secrets import get_project_secrets_manager
+
+ secrets_manager = get_project_secrets_manager(name)
+ secrets_manager.save_secrets_with_merge(secrets)
+
+
+def get_context_memory_subdir(context: "AgentContext") -> str | None:
+ # if a project is active and has memory isolation set, return the project memory subdir
+ project_name = get_context_project_name(context)
+ if project_name:
+ project_data = load_basic_project_data(project_name)
+ if project_data["memory"] == "own":
+ return "projects/" + project_name
+ return None # no memory override
+
+
+def create_project_meta_folders(name: str):
+ # create instructions folder
+ files.create_dir(get_project_meta_folder(name, PROJECT_INSTRUCTIONS_DIR))
+
+ # create knowledge folders
+ files.create_dir(get_project_meta_folder(name, PROJECT_KNOWLEDGE_DIR))
+ from python.helpers import memory
+
+ for memory_type in memory.Memory.Area:
+ files.create_dir(
+ get_project_meta_folder(name, PROJECT_KNOWLEDGE_DIR, memory_type.value)
+ )
+
+
+def get_knowledge_files_count(name: str):
+ knowledge_folder = files.get_abs_path(
+ get_project_meta_folder(name, PROJECT_KNOWLEDGE_DIR)
+ )
+ return len(files.list_files_in_dir_recursively(knowledge_folder))
+
+def get_file_structure(name: str, basic_data: BasicProjectData|None=None) -> str:
+ project_folder = get_project_folder(name)
+ if basic_data is None:
+ basic_data = load_basic_project_data(name)
+
+ tree = str(file_tree.file_tree(
+ project_folder,
+ max_depth=basic_data["file_structure"]["max_depth"],
+ max_files=basic_data["file_structure"]["max_files"],
+ max_folders=basic_data["file_structure"]["max_folders"],
+ max_lines=basic_data["file_structure"]["max_lines"],
+ ignore=basic_data["file_structure"]["gitignore"],
+ output_mode=file_tree.OUTPUT_MODE_STRING
+ ))
+
+ # empty?
+ if "\n" not in tree:
+ tree += "\n # Empty"
+
+ return tree
+
+
\ No newline at end of file
diff --git a/python/helpers/providers.py b/python/helpers/providers.py
index cd139e88aa..f60238bd56 100644
--- a/python/helpers/providers.py
+++ b/python/helpers/providers.py
@@ -1,7 +1,8 @@
import yaml
from python.helpers import files
-from typing import List, Dict, Optional, TypedDict
+from typing import List, Dict, Optional, TypedDict, Literal
+ModelType = Literal["chat", "embedding"]
# Type alias for UI option items
class FieldOption(TypedDict):
@@ -68,16 +69,15 @@ def _load_providers(self):
opts.append({"value": pid, "label": name})
self._options[p_type] = opts
- def get_providers(self, provider_type: str) -> List[FieldOption]:
+ def get_providers(self, provider_type: ModelType) -> List[FieldOption]:
"""Returns a list of providers for a given type (e.g., 'chat', 'embedding')."""
return self._options.get(provider_type, []) if self._options else []
-
- def get_raw_providers(self, provider_type: str) -> List[Dict[str, str]]:
+ def get_raw_providers(self, provider_type: ModelType) -> List[Dict[str, str]]:
"""Return raw provider dictionaries for advanced use-cases."""
return self._raw.get(provider_type, []) if self._raw else []
- def get_provider_config(self, provider_type: str, provider_id: str) -> Optional[Dict[str, str]]:
+ def get_provider_config(self, provider_type: ModelType, provider_id: str) -> Optional[Dict[str, str]]:
"""Return the metadata dict for a single provider id (case-insensitive)."""
provider_id_low = provider_id.lower()
for p in self.get_raw_providers(provider_type):
@@ -86,16 +86,16 @@ def get_provider_config(self, provider_type: str, provider_id: str) -> Optional[
return None
-def get_providers(provider_type: str) -> List[FieldOption]:
+def get_providers(provider_type: ModelType) -> List[FieldOption]:
"""Convenience function to get providers of a specific type."""
return ProviderManager.get_instance().get_providers(provider_type)
-def get_raw_providers(provider_type: str) -> List[Dict[str, str]]:
+def get_raw_providers(provider_type: ModelType) -> List[Dict[str, str]]:
"""Return full metadata for providers of a given type."""
return ProviderManager.get_instance().get_raw_providers(provider_type)
-def get_provider_config(provider_type: str, provider_id: str) -> Optional[Dict[str, str]]:
+def get_provider_config(provider_type: ModelType, provider_id: str) -> Optional[Dict[str, str]]:
"""Return metadata for a single provider (None if not found)."""
- return ProviderManager.get_instance().get_provider_config(provider_type, provider_id)
\ No newline at end of file
+ return ProviderManager.get_instance().get_provider_config(provider_type, provider_id)
\ No newline at end of file
diff --git a/python/helpers/runtime.py b/python/helpers/runtime.py
index d36a9958c0..2120b36cf4 100644
--- a/python/helpers/runtime.py
+++ b/python/helpers/runtime.py
@@ -1,14 +1,16 @@
import argparse
import inspect
import secrets
+from pathlib import Path
from typing import TypeVar, Callable, Awaitable, Union, overload, cast
from python.helpers import dotenv, rfc, settings, files
import asyncio
import threading
import queue
+import sys
-T = TypeVar('T')
-R = TypeVar('R')
+T = TypeVar("T")
+R = TypeVar("R")
parser = argparse.ArgumentParser()
args = {}
@@ -40,31 +42,38 @@ def initialize():
key = key.lstrip("-")
args[key] = value
+
def get_arg(name: str):
global args
return args.get(name, None)
+
def has_arg(name: str):
global args
return name in args
+
def is_dockerized() -> bool:
return bool(get_arg("dockerized"))
+
def is_development() -> bool:
return not is_dockerized()
+
def get_local_url():
if is_dockerized():
return "host.docker.internal"
return "127.0.0.1"
+
def get_runtime_id() -> str:
global runtime_id
if not runtime_id:
- runtime_id = secrets.token_hex(8)
+ runtime_id = secrets.token_hex(8)
return runtime_id
+
def get_persistent_id() -> str:
id = dotenv.get_dotenv_value("A0_PERSISTENT_RUNTIME_ID")
if not id:
@@ -72,17 +81,28 @@ def get_persistent_id() -> str:
dotenv.save_dotenv_value("A0_PERSISTENT_RUNTIME_ID", id)
return id
+
@overload
-async def call_development_function(func: Callable[..., Awaitable[T]], *args, **kwargs) -> T: ...
+async def call_development_function(
+ func: Callable[..., Awaitable[T]], *args, **kwargs
+) -> T: ...
+
@overload
async def call_development_function(func: Callable[..., T], *args, **kwargs) -> T: ...
-async def call_development_function(func: Union[Callable[..., T], Callable[..., Awaitable[T]]], *args, **kwargs) -> T:
+
+async def call_development_function(
+ func: Union[Callable[..., T], Callable[..., Awaitable[T]]], *args, **kwargs
+) -> T:
if is_development():
url = _get_rfc_url()
password = _get_rfc_password()
- module = files.deabsolute_path(func.__code__.co_filename).replace("/", ".").removesuffix(".py") # __module__ is not reliable
+ # Normalize path components to build a valid Python module path across OSes
+ module_path = Path(
+ files.deabsolute_path(func.__code__.co_filename)
+ ).with_suffix("")
+ module = ".".join(module_path.parts) # __module__ is not reliable
result = await rfc.call_rfc(
url=url,
password=password,
@@ -96,7 +116,7 @@ async def call_development_function(func: Union[Callable[..., T], Callable[...,
if inspect.iscoroutinefunction(func):
return await func(*args, **kwargs)
else:
- return func(*args, **kwargs) # type: ignore
+ return func(*args, **kwargs) # type: ignore
async def handle_rfc(rfc_call: rfc.RFCCall):
@@ -114,45 +134,61 @@ def _get_rfc_url() -> str:
set = settings.get_settings()
url = set["rfc_url"]
if not "://" in url:
- url = "http://"+url
+ url = "http://" + url
if url.endswith("/"):
url = url[:-1]
- url = url+":"+str(set["rfc_port_http"])
+ url = url + ":" + str(set["rfc_port_http"])
url += "/rfc"
return url
-def call_development_function_sync(func: Union[Callable[..., T], Callable[..., Awaitable[T]]], *args, **kwargs) -> T:
+def call_development_function_sync(
+ func: Union[Callable[..., T], Callable[..., Awaitable[T]]], *args, **kwargs
+) -> T:
# run async function in sync manner
result_queue = queue.Queue()
-
+
def run_in_thread():
result = asyncio.run(call_development_function(func, *args, **kwargs))
result_queue.put(result)
-
+
thread = threading.Thread(target=run_in_thread)
thread.start()
thread.join(timeout=30) # wait for thread with timeout
-
+
if thread.is_alive():
raise TimeoutError("Function call timed out after 30 seconds")
-
+
result = result_queue.get_nowait()
return cast(T, result)
def get_web_ui_port():
web_ui_port = (
- get_arg("port")
- or int(dotenv.get_dotenv_value("WEB_UI_PORT", 0))
- or 5000
+ get_arg("port") or int(dotenv.get_dotenv_value("WEB_UI_PORT", 0)) or 5000
)
return web_ui_port
+
def get_tunnel_api_port():
tunnel_api_port = (
get_arg("tunnel_api_port")
or int(dotenv.get_dotenv_value("TUNNEL_API_PORT", 0))
or 55520
)
- return tunnel_api_port
\ No newline at end of file
+ return tunnel_api_port
+
+
+def get_platform():
+ return sys.platform
+
+
+def is_windows():
+ return get_platform() == "win32"
+
+
+def get_terminal_executable():
+ if is_windows():
+ return "powershell.exe"
+ else:
+ return "/bin/bash"
diff --git a/python/helpers/secrets.py b/python/helpers/secrets.py
index a4b58c51c1..53ee85d9b0 100644
--- a/python/helpers/secrets.py
+++ b/python/helpers/secrets.py
@@ -4,27 +4,32 @@
import os
from io import StringIO
from dataclasses import dataclass
-from typing import Dict, Optional, List, Literal, Set, Callable
+from typing import Dict, Optional, List, Literal, Set, Callable, Tuple, TYPE_CHECKING
from dotenv.parser import parse_stream
from python.helpers.errors import RepairableException
from python.helpers import files
+if TYPE_CHECKING:
+ from agent import AgentContext
+
# New alias-based placeholder format Β§Β§secret(KEY)
ALIAS_PATTERN = r"Β§Β§secret\(([A-Za-z_][A-Za-z0-9_]*)\)"
+DEFAULT_SECRETS_FILE = "tmp/secrets.env"
+
def alias_for_key(key: str, placeholder: str = "Β§Β§secret({key})") -> str:
# Return alias string for given key in upper-case
key = key.upper()
return placeholder.format(key=key)
+
@dataclass
class EnvLine:
raw: str
type: Literal["pair", "comment", "blank", "other"]
key: Optional[str] = None
value: Optional[str] = None
- key_part: Optional[str] = None # original left side including whitespace up to '='
inline_comment: Optional[str] = (
None # preserves trailing inline comment including leading spaces and '#'
)
@@ -118,44 +123,55 @@ def finalize(self) -> str:
class SecretsManager:
- SECRETS_FILE = "tmp/secrets.env"
PLACEHOLDER_PATTERN = ALIAS_PATTERN
MASK_VALUE = "***"
- _instance: Optional["SecretsManager"] = None
+ _instances: Dict[Tuple[str, ...], "SecretsManager"] = {}
_secrets_cache: Optional[Dict[str, str]] = None
_last_raw_text: Optional[str] = None
@classmethod
- def get_instance(cls) -> "SecretsManager":
- if cls._instance is None:
- cls._instance = cls()
- return cls._instance
-
- def __init__(self):
+ def get_instance(cls, *secrets_files: str) -> "SecretsManager":
+ if not secrets_files:
+ secrets_files = (DEFAULT_SECRETS_FILE,)
+ key = tuple(secrets_files)
+ if key not in cls._instances:
+ cls._instances[key] = cls(*secrets_files)
+ return cls._instances[key]
+
+ def __init__(self, *files: str):
self._lock = threading.RLock()
- # instance-level override for secrets file
- self._secrets_file_rel = self.SECRETS_FILE
-
- def set_secrets_file(self, relative_path: str):
- """Override the relative secrets file location (useful for tests)."""
- with self._lock:
- self._secrets_file_rel = relative_path
- self.clear_cache()
+ # instance-level list of secrets files
+ self._files: Tuple[str, ...] = tuple(files) if files else (DEFAULT_SECRETS_FILE,)
+ self._raw_snapshots: Dict[str, str] = {}
+ self._secrets_cache = None
+ self._last_raw_text = None
def read_secrets_raw(self) -> str:
"""Read raw secrets file content from local filesystem (same system)."""
- try:
- content = files.read_file(self._secrets_file_rel)
- self._last_raw_text = content
- return content
- except Exception:
- self._last_raw_text = ""
- return ""
+ parts: List[str] = []
+ self._raw_snapshots = {}
+
+ for path in self._files:
+ try:
+ content = files.read_file(path)
+ except Exception:
+ content = ""
+
+ self._raw_snapshots[path] = content
+ parts.append(content)
+
+ combined = "\n".join(parts)
+ self._last_raw_text = combined
+ return combined
def _write_secrets_raw(self, content: str):
"""Write raw secrets file content to local filesystem."""
- files.write_file(self._secrets_file_rel, content)
+ if len(self._files) != 1:
+ raise RuntimeError(
+ "Saving secrets content is only supported for a single secrets file"
+ )
+ files.write_file(self._files[0], content)
def load_secrets(self) -> Dict[str, str]:
"""Load secrets from file, return key-value dict"""
@@ -163,29 +179,27 @@ def load_secrets(self) -> Dict[str, str]:
if self._secrets_cache is not None:
return self._secrets_cache
- secrets: Dict[str, str] = {}
- try:
- content = self.read_secrets_raw()
- # keep raw snapshot for future save merge without reading again
- self._last_raw_text = content
- if content:
- secrets = self.parse_env_content(content)
- except Exception as e:
- # On unexpected failure, keep empty cache rather than crash
- secrets = {}
-
- self._secrets_cache = secrets
- return secrets
+ combined_raw = self.read_secrets_raw()
+ merged_secrets = (
+ self.parse_env_content(combined_raw) if combined_raw else {}
+ )
+
+ # Only track the first file's raw text for single-file setups
+ if len(self._files) != 1:
+ self._last_raw_text = None
+
+ self._secrets_cache = merged_secrets
+ return merged_secrets
def save_secrets(self, secrets_content: str):
"""Save secrets content to file and update cache"""
+ if len(self._files) != 1:
+ raise RuntimeError(
+ "Saving secrets is disabled when multiple files are configured"
+ )
with self._lock:
- # Ensure write to local filesystem (UTF-8)
self._write_secrets_raw(secrets_content)
- # Update cache
- self._secrets_cache = self.parse_env_content(secrets_content)
- # Update raw snapshot
- self._last_raw_text = secrets_content
+ self._invalidate_all_caches()
def save_secrets_with_merge(self, submitted_content: str):
"""Merge submitted content with existing file preserving comments, order and supporting deletion.
@@ -193,13 +207,19 @@ def save_secrets_with_merge(self, submitted_content: str):
- Keys present in existing but omitted from submitted are deleted.
- New keys with non-masked values are appended at the end.
"""
+ if len(self._files) != 1:
+ raise RuntimeError(
+ "Merging secrets is disabled when multiple files are configured"
+ )
with self._lock:
# Prefer in-memory snapshot to avoid disk reads during save
+ primary_path = self._files[0]
if self._last_raw_text is not None:
existing_text = self._last_raw_text
else:
try:
- existing_text = self.read_secrets_raw()
+ existing_text = files.read_file(primary_path)
+ self._raw_snapshots[primary_path] = existing_text
except Exception as e:
# If read fails and submitted contains masked values, abort to avoid losing values/comments
if self.MASK_VALUE in submitted_content:
@@ -210,7 +230,8 @@ def save_secrets_with_merge(self, submitted_content: str):
existing_text = ""
merged_lines = self._merge_env(existing_text, submitted_content)
merged_text = self._serialize_env_lines(merged_lines)
- self.save_secrets(merged_text)
+ self._write_secrets_raw(merged_text)
+ self._invalidate_all_caches()
def get_keys(self) -> List[str]:
"""Get list of secret keys"""
@@ -219,7 +240,7 @@ def get_keys(self) -> List[str]:
def get_secrets_for_prompt(self) -> str:
"""Get formatted string of secret keys for system prompt"""
- content = self._last_raw_text or self.read_secrets_raw()
+ content = self.read_secrets_raw()
if not content:
return ""
@@ -251,9 +272,7 @@ def replacer(match):
return secrets[key]
else:
available_keys = ", ".join(secrets.keys())
- error_msg = (
- f"Secret placeholder '{alias_for_key(key)}' not found in secrets store.\n"
- )
+ error_msg = f"Secret placeholder '{alias_for_key(key)}' not found in secrets store.\n"
error_msg += f"Available secrets: {available_keys}"
raise RepairableException(error_msg)
@@ -276,7 +295,9 @@ def change_placeholders(self, text: str, new_format: str) -> str:
return result
- def mask_values(self, text: str, min_length: int = 4, placeholder: str = "Β§Β§secret({key})") -> str:
+ def mask_values(
+ self, text: str, min_length: int = 4, placeholder: str = "Β§Β§secret({key})"
+ ) -> str:
"""Replace actual secret values with placeholders in text"""
if not text:
return text
@@ -295,18 +316,21 @@ def mask_values(self, text: str, min_length: int = 4, placeholder: str = "Β§Β§se
def get_masked_secrets(self) -> str:
"""Get content with values masked for frontend display (preserves comments and unrecognized lines)"""
- if not (content:=self.read_secrets_raw()):
+ content = self.read_secrets_raw()
+ if not content:
return ""
# Parse content for known keys using python-dotenv
secrets_map = self.parse_env_content(content)
env_lines = self.parse_env_lines(content)
+
# Replace values with mask for keys present
for ln in env_lines:
if ln.type == "pair" and ln.key is not None:
ln.key = ln.key.upper()
if ln.key in secrets_map and secrets_map[ln.key] != "":
ln.value = self.MASK_VALUE
+
return self._serialize_env_lines(env_lines)
def parse_env_content(self, content: str) -> Dict[str, str]:
@@ -325,6 +349,13 @@ def clear_cache(self):
"""Clear the secrets cache"""
with self._lock:
self._secrets_cache = None
+ self._raw_snapshots = {}
+ self._last_raw_text = None
+
+ @classmethod
+ def _invalidate_all_caches(cls):
+ for instance in cls._instances.values():
+ instance.clear_cache()
# ---------------- Internal helpers for parsing/merging ----------------
@@ -342,9 +373,7 @@ def parse_env_lines(self, content: str) -> List[EnvLine]:
# Fallback to composed key_part if original not available
if "=" in line_text:
left, right = line_text.split("=", 1)
- key_part = left
else:
- key_part = binding.key
right = ""
# Try to extract inline comment by scanning right side to comment start, respecting quotes
in_single = False
@@ -376,7 +405,6 @@ def parse_env_lines(self, content: str) -> List[EnvLine]:
type="pair",
key=binding.key,
value=binding.value or "",
- key_part=key_part,
inline_comment=inline_comment,
)
)
@@ -404,11 +432,15 @@ def _serialize_env_lines(
out: List[str] = []
for ln in lines:
if ln.type == "pair" and ln.key is not None:
- left_raw = ln.key_part if ln.key_part is not None else ln.key
+ left_raw = ln.key
left = left_raw.upper()
val = ln.value if ln.value is not None else ""
comment = ln.inline_comment or ""
- formatted_key = key_formatter(left) if key_formatter else f"{key_delimiter}{left}{key_delimiter}"
+ formatted_key = (
+ key_formatter(left)
+ if key_formatter
+ else f"{key_delimiter}{left}{key_delimiter}"
+ )
val_part = f'="{val}"' if with_values else ""
comment_part = f" {comment}" if with_comments and comment else ""
out.append(f"{formatted_key}{val_part}{comment_part}")
@@ -455,11 +487,10 @@ def _merge_env(self, existing_text: str, submitted_text: str) -> List[EnvLine]:
existing_val = existing_pairs[key].value or ""
merged.append(
EnvLine(
- raw=f"{(sub.key_part or key)}={existing_val}",
+ raw=f"{key}={existing_val}",
type="pair",
key=key,
value=existing_val,
- key_part=sub.key_part or key,
inline_comment=sub.inline_comment,
)
)
@@ -471,3 +502,40 @@ def _merge_env(self, existing_text: str, submitted_text: str) -> List[EnvLine]:
merged.append(sub)
return merged
+
+
+def get_secrets_manager(context: "AgentContext|None" = None) -> SecretsManager:
+ from python.helpers import projects
+
+ # default secrets file
+ secret_files = [DEFAULT_SECRETS_FILE]
+
+ # use AgentContext from contextvars if no context provided
+ if not context:
+ from agent import AgentContext
+ context = AgentContext.current()
+
+ # merged with project secrets if active
+ if context:
+ project = projects.get_context_project_name(context)
+ if project:
+ secret_files.append(files.get_abs_path(projects.get_project_meta_folder(project), "secrets.env"))
+
+ return SecretsManager.get_instance(*secret_files)
+
+def get_project_secrets_manager(project_name: str, merge_with_global: bool = False) -> SecretsManager:
+ from python.helpers import projects
+
+ # default secrets file
+ secret_files = []
+
+ if merge_with_global:
+ secret_files.append(DEFAULT_SECRETS_FILE)
+
+ # merged with project secrets if active
+ secret_files.append(files.get_abs_path(projects.get_project_meta_folder(project_name), "secrets.env"))
+
+ return SecretsManager.get_instance(*secret_files)
+
+def get_default_secrets_manager() -> SecretsManager:
+ return SecretsManager.get_instance()
\ No newline at end of file
diff --git a/python/helpers/settings.py b/python/helpers/settings.py
index 3c6d826b89..c5e8cd1883 100644
--- a/python/helpers/settings.py
+++ b/python/helpers/settings.py
@@ -4,17 +4,55 @@
import os
import re
import subprocess
-from typing import Any, Literal, TypedDict, cast
+from typing import Any, Literal, TypedDict, cast, TypeVar
import models
from python.helpers import runtime, whisper, defer, git
from . import files, dotenv
from python.helpers.print_style import PrintStyle
-from python.helpers.providers import get_providers
-from python.helpers.secrets import SecretsManager
+from python.helpers.providers import get_providers, FieldOption as ProvidersFO
+from python.helpers.secrets import get_default_secrets_manager
from python.helpers import dirty_json
+T = TypeVar("T")
+
+
+def get_default_value(name: str, value: T) -> T:
+ """
+ Load setting value from .env with A0_SET_ prefix, falling back to default.
+
+ Args:
+ name: Setting name (will be prefixed with A0_SET_)
+ value: Default value to use if env var not set
+
+ Returns:
+ Environment variable value (type-normalized) or default value
+ """
+ env_value = dotenv.get_dotenv_value(
+ f"A0_SET_{name}", dotenv.get_dotenv_value(f"A0_SET_{name.upper()}", None)
+ )
+
+ if env_value is None:
+ return value
+
+ # Normalize type to match value param type
+ try:
+ if isinstance(value, bool):
+ return env_value.strip().lower() in ("true", "1", "yes", "on") # type: ignore
+ elif isinstance(value, dict):
+ return json.loads(env_value.strip()) # type: ignore
+ elif isinstance(value, str):
+ return str(env_value).strip() # type: ignore
+ else:
+ return type(value)(env_value.strip()) # type: ignore
+ except (ValueError, TypeError, json.JSONDecodeError) as e:
+ PrintStyle(background_color="yellow", font_color="black").print(
+ f"Warning: Invalid value for A0_SET_{name}='{env_value}': {e}. Using default: {value}"
+ )
+ return value
+
+
class Settings(TypedDict):
version: str
@@ -87,7 +125,7 @@ class Settings(TypedDict):
rfc_port_http: int
rfc_port_ssh: int
- shell_interface: Literal['local','ssh']
+ shell_interface: Literal["local", "ssh"]
stt_model_size: str
stt_language: str
@@ -111,6 +149,9 @@ class Settings(TypedDict):
# LiteLLM global kwargs applied to all model calls
litellm_global_kwargs: dict[str, Any]
+ update_check_enabled: bool
+
+
class PartialSettings(Settings, total=False):
pass
@@ -152,8 +193,23 @@ class SettingsSection(TypedDict, total=False):
tab: str # Indicates which tab this section belongs to
+class ModelProvider(ProvidersFO):
+ pass
+
+
+class SettingsOutputAdditional(TypedDict):
+ chat_providers: list[ModelProvider]
+ embedding_providers: list[ModelProvider]
+ shell_interfaces: list[FieldOption]
+ agent_subdirs: list[FieldOption]
+ knowledge_subdirs: list[FieldOption]
+ stt_models: list[FieldOption]
+ is_dockerized: bool
+
+
class SettingsOutput(TypedDict):
- sections: list[SettingsSection]
+ settings: Settings
+ additional: SettingsOutputAdditional
PASSWORD_PLACEHOLDER = "****PSWD****"
@@ -162,825 +218,47 @@ class SettingsOutput(TypedDict):
SETTINGS_FILE = files.get_abs_path("tmp/settings.json")
_settings: Settings | None = None
+OptionT = TypeVar("OptionT", bound=FieldOption)
-def convert_out(settings: Settings) -> SettingsOutput:
- default_settings = get_default_settings()
-
- # main model section
- chat_model_fields: list[SettingsField] = []
- chat_model_fields.append(
- {
- "id": "chat_model_provider",
- "title": "Chat model provider",
- "description": "Select provider for main chat model used by Agent Zero",
- "type": "select",
- "value": settings["chat_model_provider"],
- "options": cast(list[FieldOption], get_providers("chat")),
- }
- )
- chat_model_fields.append(
- {
- "id": "chat_model_name",
- "title": "Chat model name",
- "description": "Exact name of model from selected provider",
- "type": "text",
- "value": settings["chat_model_name"],
- }
- )
-
- chat_model_fields.append(
- {
- "id": "chat_model_api_base",
- "title": "Chat model API base URL",
- "description": "API base URL for main chat model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
- "type": "text",
- "value": settings["chat_model_api_base"],
- }
- )
-
- chat_model_fields.append(
- {
- "id": "chat_model_ctx_length",
- "title": "Chat model context length",
- "description": "Maximum number of tokens in the context window for LLM. System prompt, chat history, RAG and response all count towards this limit.",
- "type": "number",
- "value": settings["chat_model_ctx_length"],
- }
- )
-
- chat_model_fields.append(
- {
- "id": "chat_model_ctx_history",
- "title": "Context window space for chat history",
- "description": "Portion of context window dedicated to chat history visible to the agent. Chat history will automatically be optimized to fit. Smaller size will result in shorter and more summarized history. The remaining space will be used for system prompt, RAG and response.",
- "type": "range",
- "min": 0.01,
- "max": 1,
- "step": 0.01,
- "value": settings["chat_model_ctx_history"],
- }
- )
-
- chat_model_fields.append(
- {
- "id": "chat_model_vision",
- "title": "Supports Vision",
- "description": "Models capable of Vision can for example natively see the content of image attachments.",
- "type": "switch",
- "value": settings["chat_model_vision"],
- }
- )
-
- chat_model_fields.append(
- {
- "id": "chat_model_rl_requests",
- "title": "Requests per minute limit",
- "description": "Limits the number of requests per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
- "type": "number",
- "value": settings["chat_model_rl_requests"],
- }
- )
-
- chat_model_fields.append(
- {
- "id": "chat_model_rl_input",
- "title": "Input tokens per minute limit",
- "description": "Limits the number of input tokens per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
- "type": "number",
- "value": settings["chat_model_rl_input"],
- }
- )
-
- chat_model_fields.append(
- {
- "id": "chat_model_rl_output",
- "title": "Output tokens per minute limit",
- "description": "Limits the number of output tokens per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
- "type": "number",
- "value": settings["chat_model_rl_output"],
- }
- )
- chat_model_fields.append(
- {
- "id": "chat_model_kwargs",
- "title": "Chat model additional parameters",
- "description": "Any other parameters supported by LiteLLM. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string.",
- "type": "textarea",
- "value": _dict_to_env(settings["chat_model_kwargs"]),
- }
- )
+def _ensure_option_present(
+ options: list[OptionT] | None, current_value: str | None
+) -> list[OptionT]:
+ """
+ Ensure the currently selected value exists in a dropdown options list.
+ If missing, inserts it at the front as {value: current_value, label: current_value}.
+ """
+ opts = list(options or [])
+ if not current_value:
+ return opts
+ for o in opts:
+ if o.get("value") == current_value:
+ return opts
+ opts.insert(0, cast(OptionT, {"value": current_value, "label": current_value}))
+ return opts
- chat_model_section: SettingsSection = {
- "id": "chat_model",
- "title": "Chat Model",
- "description": "Selection and settings for main chat model used by Agent Zero",
- "fields": chat_model_fields,
- "tab": "agent",
- }
-
- # main model section
- util_model_fields: list[SettingsField] = []
- util_model_fields.append(
- {
- "id": "util_model_provider",
- "title": "Utility model provider",
- "description": "Select provider for utility model used by the framework",
- "type": "select",
- "value": settings["util_model_provider"],
- "options": cast(list[FieldOption], get_providers("chat")),
- }
- )
- util_model_fields.append(
- {
- "id": "util_model_name",
- "title": "Utility model name",
- "description": "Exact name of model from selected provider",
- "type": "text",
- "value": settings["util_model_name"],
- }
- )
-
- util_model_fields.append(
- {
- "id": "util_model_api_base",
- "title": "Utility model API base URL",
- "description": "API base URL for utility model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
- "type": "text",
- "value": settings["util_model_api_base"],
- }
- )
- util_model_fields.append(
- {
- "id": "util_model_rl_requests",
- "title": "Requests per minute limit",
- "description": "Limits the number of requests per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
- "type": "number",
- "value": settings["util_model_rl_requests"],
- }
- )
-
- util_model_fields.append(
- {
- "id": "util_model_rl_input",
- "title": "Input tokens per minute limit",
- "description": "Limits the number of input tokens per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
- "type": "number",
- "value": settings["util_model_rl_input"],
- }
- )
-
- util_model_fields.append(
- {
- "id": "util_model_rl_output",
- "title": "Output tokens per minute limit",
- "description": "Limits the number of output tokens per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
- "type": "number",
- "value": settings["util_model_rl_output"],
- }
- )
-
- util_model_fields.append(
- {
- "id": "util_model_kwargs",
- "title": "Utility model additional parameters",
- "description": "Any other parameters supported by LiteLLM. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string.",
- "type": "textarea",
- "value": _dict_to_env(settings["util_model_kwargs"]),
- }
- )
-
- util_model_section: SettingsSection = {
- "id": "util_model",
- "title": "Utility model",
- "description": "Smaller, cheaper, faster model for handling utility tasks like organizing memory, preparing prompts, summarizing.",
- "fields": util_model_fields,
- "tab": "agent",
- }
-
- # embedding model section
- embed_model_fields: list[SettingsField] = []
- embed_model_fields.append(
- {
- "id": "embed_model_provider",
- "title": "Embedding model provider",
- "description": "Select provider for embedding model used by the framework",
- "type": "select",
- "value": settings["embed_model_provider"],
- "options": cast(list[FieldOption], get_providers("embedding")),
- }
- )
- embed_model_fields.append(
- {
- "id": "embed_model_name",
- "title": "Embedding model name",
- "description": "Exact name of model from selected provider",
- "type": "text",
- "value": settings["embed_model_name"],
- }
- )
-
- embed_model_fields.append(
- {
- "id": "embed_model_api_base",
- "title": "Embedding model API base URL",
- "description": "API base URL for embedding model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
- "type": "text",
- "value": settings["embed_model_api_base"],
- }
- )
-
- embed_model_fields.append(
- {
- "id": "embed_model_rl_requests",
- "title": "Requests per minute limit",
- "description": "Limits the number of requests per minute to the embedding model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
- "type": "number",
- "value": settings["embed_model_rl_requests"],
- }
- )
-
- embed_model_fields.append(
- {
- "id": "embed_model_rl_input",
- "title": "Input tokens per minute limit",
- "description": "Limits the number of input tokens per minute to the embedding model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
- "type": "number",
- "value": settings["embed_model_rl_input"],
- }
- )
-
- embed_model_fields.append(
- {
- "id": "embed_model_kwargs",
- "title": "Embedding model additional parameters",
- "description": "Any other parameters supported by LiteLLM. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string.",
- "type": "textarea",
- "value": _dict_to_env(settings["embed_model_kwargs"]),
- }
- )
-
- embed_model_section: SettingsSection = {
- "id": "embed_model",
- "title": "Embedding Model",
- "description": f"Settings for the embedding model used by Agent Zero.
β οΈ No need to change
The default HuggingFace model {default_settings['embed_model_name']} is preloaded and runs locally within the docker container and there's no need to change it unless you have a specific requirements for embedding.",
- "fields": embed_model_fields,
- "tab": "agent",
- }
-
- # embedding model section
- browser_model_fields: list[SettingsField] = []
- browser_model_fields.append(
- {
- "id": "browser_model_provider",
- "title": "Web Browser model provider",
- "description": "Select provider for web browser model used by browser-use framework",
- "type": "select",
- "value": settings["browser_model_provider"],
- "options": cast(list[FieldOption], get_providers("chat")),
- }
- )
- browser_model_fields.append(
- {
- "id": "browser_model_name",
- "title": "Web Browser model name",
- "description": "Exact name of model from selected provider",
- "type": "text",
- "value": settings["browser_model_name"],
- }
- )
-
- browser_model_fields.append(
- {
- "id": "browser_model_api_base",
- "title": "Web Browser model API base URL",
- "description": "API base URL for web browser model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
- "type": "text",
- "value": settings["browser_model_api_base"],
- }
- )
-
- browser_model_fields.append(
- {
- "id": "browser_model_vision",
- "title": "Use Vision",
- "description": "Models capable of Vision can use it to analyze web pages from screenshots. Increases quality but also token usage.",
- "type": "switch",
- "value": settings["browser_model_vision"],
- }
- )
-
- browser_model_fields.append(
- {
- "id": "browser_model_rl_requests",
- "title": "Web Browser model rate limit requests",
- "description": "Rate limit requests for web browser model.",
- "type": "number",
- "value": settings["browser_model_rl_requests"],
- }
- )
-
- browser_model_fields.append(
- {
- "id": "browser_model_rl_input",
- "title": "Web Browser model rate limit input",
- "description": "Rate limit input for web browser model.",
- "type": "number",
- "value": settings["browser_model_rl_input"],
- }
- )
-
- browser_model_fields.append(
- {
- "id": "browser_model_rl_output",
- "title": "Web Browser model rate limit output",
- "description": "Rate limit output for web browser model.",
- "type": "number",
- "value": settings["browser_model_rl_output"],
- }
- )
-
- browser_model_fields.append(
- {
- "id": "browser_model_kwargs",
- "title": "Web Browser model additional parameters",
- "description": "Any other parameters supported by LiteLLM. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string.",
- "type": "textarea",
- "value": _dict_to_env(settings["browser_model_kwargs"]),
- }
- )
-
- browser_model_fields.append(
- {
- "id": "browser_http_headers",
- "title": "HTTP Headers",
- "description": "HTTP headers to include with all browser requests. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string. Example: Authorization=Bearer token123",
- "type": "textarea",
- "value": _dict_to_env(settings.get("browser_http_headers", {})),
- }
- )
-
- browser_model_section: SettingsSection = {
- "id": "browser_model",
- "title": "Web Browser Model",
- "description": "Settings for the web browser model. Agent Zero uses browser-use agentic framework to handle web interactions.",
- "fields": browser_model_fields,
- "tab": "agent",
- }
-
- # basic auth section
- auth_fields: list[SettingsField] = []
-
- auth_fields.append(
- {
- "id": "auth_login",
- "title": "UI Login",
- "description": "Set user name for web UI",
- "type": "text",
- "value": dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN) or "",
- }
- )
-
- auth_fields.append(
- {
- "id": "auth_password",
- "title": "UI Password",
- "description": "Set user password for web UI",
- "type": "password",
- "value": (
- PASSWORD_PLACEHOLDER
- if dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD)
- else ""
- ),
- }
- )
-
- if runtime.is_dockerized():
- auth_fields.append(
- {
- "id": "root_password",
- "title": "root Password",
- "description": "Change linux root password in docker container. This password can be used for SSH access. Original password was randomly generated during setup.",
- "type": "password",
- "value": "",
- }
- )
-
- auth_section: SettingsSection = {
- "id": "auth",
- "title": "Authentication",
- "description": "Settings for authentication to use Agent Zero Web UI.",
- "fields": auth_fields,
- "tab": "external",
- }
-
- # api keys model section
- api_keys_fields: list[SettingsField] = []
-
- # Collect unique providers from both chat and embedding sections
- providers_seen: set[str] = set()
- for p_type in ("chat", "embedding"):
- for provider in get_providers(p_type):
- pid_lower = provider["value"].lower()
- if pid_lower in providers_seen:
- continue
- providers_seen.add(pid_lower)
- api_keys_fields.append(
- _get_api_key_field(settings, pid_lower, provider["label"])
- )
-
- api_keys_section: SettingsSection = {
- "id": "api_keys",
- "title": "API Keys",
- "description": "API keys for model providers and services used by Agent Zero. You can set multiple API keys separated by a comma (,). They will be used in round-robin fashion. For more information abou Agent Zero Venice provider, see Agent Zero Venice.",
- "fields": api_keys_fields,
- "tab": "external",
- }
-
- # LiteLLM global config section
- litellm_fields: list[SettingsField] = []
-
- litellm_fields.append(
- {
- "id": "litellm_global_kwargs",
- "title": "LiteLLM global parameters",
- "description": "Global LiteLLM params (e.g. timeout, stream_timeout) in .env format: one KEY=VALUE per line. Example: stream_timeout=30. Applied to all LiteLLM calls unless overridden. See LiteLLM and timeouts.",
- "type": "textarea",
- "value": _dict_to_env(settings["litellm_global_kwargs"]),
- "style": "height: 12em",
- }
- )
-
- litellm_section: SettingsSection = {
- "id": "litellm",
- "title": "LiteLLM Global Settings",
- "description": "Configure global parameters passed to LiteLLM for all providers.",
- "fields": litellm_fields,
- "tab": "external",
- }
-
- # Agent config section
- agent_fields: list[SettingsField] = []
-
- agent_fields.append(
- {
- "id": "agent_profile",
- "title": "Default agent profile",
- "description": "Subdirectory of /agents folder to be used by default agent no. 0. Subordinate agents can be spawned with other profiles, that is on their superior agent to decide. This setting affects the behaviour of the top level agent you communicate with.",
- "type": "select",
- "value": settings["agent_profile"],
- "options": [
+def convert_out(settings: Settings) -> SettingsOutput:
+ out = SettingsOutput(
+ settings=settings.copy(),
+ additional=SettingsOutputAdditional(
+ chat_providers=get_providers("chat"),
+ embedding_providers=get_providers("embedding"),
+ shell_interfaces=[
+ {"value": "local", "label": "Local Python TTY"},
+ {"value": "ssh", "label": "SSH"},
+ ],
+ is_dockerized=runtime.is_dockerized(),
+ agent_subdirs=[
{"value": subdir, "label": subdir}
for subdir in files.get_subdirectories("agents")
if subdir != "_example"
],
- }
- )
-
- agent_fields.append(
- {
- "id": "agent_knowledge_subdir",
- "title": "Knowledge subdirectory",
- "description": "Subdirectory of /knowledge folder to use for agent knowledge import. 'default' subfolder is always imported and contains framework knowledge.",
- "type": "select",
- "value": settings["agent_knowledge_subdir"],
- "options": [
+ knowledge_subdirs=[
{"value": subdir, "label": subdir}
for subdir in files.get_subdirectories("knowledge", exclude="default")
],
- }
- )
-
- agent_section: SettingsSection = {
- "id": "agent",
- "title": "Agent Config",
- "description": "Agent parameters.",
- "fields": agent_fields,
- "tab": "agent",
- }
-
- memory_fields: list[SettingsField] = []
-
- memory_fields.append(
- {
- "id": "agent_memory_subdir",
- "title": "Memory Subdirectory",
- "description": "Subdirectory of /memory folder to use for agent memory storage. Used to separate memory storage between different instances.",
- "type": "text",
- "value": settings["agent_memory_subdir"],
- # "options": [
- # {"value": subdir, "label": subdir}
- # for subdir in files.get_subdirectories("memory", exclude="embeddings")
- # ],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_dashboard",
- "title": "Memory Dashboard",
- "description": "View and explore all stored memories in a table format with filtering and search capabilities.",
- "type": "button",
- "value": "Open Dashboard",
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_enabled",
- "title": "Memory auto-recall enabled",
- "description": "Agent Zero will automatically recall memories based on convesation context.",
- "type": "switch",
- "value": settings["memory_recall_enabled"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_delayed",
- "title": "Memory auto-recall delayed",
- "description": "The agent will not wait for auto memory recall. Memories will be delivered one message later. This speeds up agent's response time but may result in less relevant first step.",
- "type": "switch",
- "value": settings["memory_recall_delayed"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_query_prep",
- "title": "Auto-recall AI query preparation",
- "description": "Enables vector DB query preparation from conversation context by utility LLM for auto-recall. Improves search quality, adds 1 utility LLM call per auto-recall.",
- "type": "switch",
- "value": settings["memory_recall_query_prep"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_post_filter",
- "title": "Auto-recall AI post-filtering",
- "description": "Enables memory relevance filtering by utility LLM for auto-recall. Improves search quality, adds 1 utility LLM call per auto-recall.",
- "type": "switch",
- "value": settings["memory_recall_post_filter"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_interval",
- "title": "Memory auto-recall interval",
- "description": "Memories are recalled after every user or superior agent message. During agent's monologue, memories are recalled every X turns based on this parameter.",
- "type": "range",
- "min": 1,
- "max": 10,
- "step": 1,
- "value": settings["memory_recall_interval"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_history_len",
- "title": "Memory auto-recall history length",
- "description": "The length of conversation history passed to memory recall LLM for context (in characters).",
- "type": "number",
- "value": settings["memory_recall_history_len"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_similarity_threshold",
- "title": "Memory auto-recall similarity threshold",
- "description": "The threshold for similarity search in memory recall (0 = no similarity, 1 = exact match).",
- "type": "range",
- "min": 0,
- "max": 1,
- "step": 0.01,
- "value": settings["memory_recall_similarity_threshold"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_memories_max_search",
- "title": "Memory auto-recall max memories to search",
- "description": "The maximum number of memories returned by vector DB for further processing.",
- "type": "number",
- "value": settings["memory_recall_memories_max_search"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_memories_max_result",
- "title": "Memory auto-recall max memories to use",
- "description": "The maximum number of memories to inject into A0's context window.",
- "type": "number",
- "value": settings["memory_recall_memories_max_result"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_solutions_max_search",
- "title": "Memory auto-recall max solutions to search",
- "description": "The maximum number of solutions returned by vector DB for further processing.",
- "type": "number",
- "value": settings["memory_recall_solutions_max_search"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_recall_solutions_max_result",
- "title": "Memory auto-recall max solutions to use",
- "description": "The maximum number of solutions to inject into A0's context window.",
- "type": "number",
- "value": settings["memory_recall_solutions_max_result"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_memorize_enabled",
- "title": "Auto-memorize enabled",
- "description": "A0 will automatically memorize facts and solutions from conversation history.",
- "type": "switch",
- "value": settings["memory_memorize_enabled"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_memorize_consolidation",
- "title": "Auto-memorize AI consolidation",
- "description": "A0 will automatically consolidate similar memories using utility LLM. Improves memory quality over time, adds 2 utility LLM calls per memory.",
- "type": "switch",
- "value": settings["memory_memorize_consolidation"],
- }
- )
-
- memory_fields.append(
- {
- "id": "memory_memorize_replace_threshold",
- "title": "Auto-memorize replacement threshold",
- "description": "Only applies when AI consolidation is disabled. Replaces previous similar memories with new ones based on this threshold. 0 = replace even if not similar at all, 1 = replace only if exact match.",
- "type": "range",
- "min": 0,
- "max": 1,
- "step": 0.01,
- "value": settings["memory_memorize_replace_threshold"],
- }
- )
-
- memory_section: SettingsSection = {
- "id": "memory",
- "title": "Memory",
- "description": "Configuration of A0's memory system. A0 memorizes and recalls memories automatically to help it's context awareness.",
- "fields": memory_fields,
- "tab": "agent",
- }
-
- dev_fields: list[SettingsField] = []
-
- dev_fields.append(
- {
- "id": "shell_interface",
- "title": "Shell Interface",
- "description": "Terminal interface used for Code Execution Tool. Local Python TTY works locally in both dockerized and development environments. SSH always connects to dockerized environment (automatically at localhost or RFC host address).",
- "type": "select",
- "value": settings["shell_interface"],
- "options": [{"value": "local", "label": "Local Python TTY"}, {"value": "ssh", "label": "SSH"}],
- }
- )
-
- if runtime.is_development():
- # dev_fields.append(
- # {
- # "id": "rfc_auto_docker",
- # "title": "RFC Auto Docker Management",
- # "description": "Automatically create dockerized instance of A0 for RFCs using this instance's code base and, settings and .env.",
- # "type": "text",
- # "value": settings["rfc_auto_docker"],
- # }
- # )
-
- dev_fields.append(
- {
- "id": "rfc_url",
- "title": "RFC Destination URL",
- "description": "URL of dockerized A0 instance for remote function calls. Do not specify port here.",
- "type": "text",
- "value": settings["rfc_url"],
- }
- )
-
- dev_fields.append(
- {
- "id": "rfc_password",
- "title": "RFC Password",
- "description": "Password for remote function calls. Passwords must match on both instances. RFCs can not be used with empty password.",
- "type": "password",
- "value": (
- PASSWORD_PLACEHOLDER
- if dotenv.get_dotenv_value(dotenv.KEY_RFC_PASSWORD)
- else ""
- ),
- }
- )
-
- if runtime.is_development():
- dev_fields.append(
- {
- "id": "rfc_port_http",
- "title": "RFC HTTP port",
- "description": "HTTP port for dockerized instance of A0.",
- "type": "text",
- "value": settings["rfc_port_http"],
- }
- )
-
- dev_fields.append(
- {
- "id": "rfc_port_ssh",
- "title": "RFC SSH port",
- "description": "SSH port for dockerized instance of A0.",
- "type": "text",
- "value": settings["rfc_port_ssh"],
- }
- )
-
- dev_section: SettingsSection = {
- "id": "dev",
- "title": "Development",
- "description": "Parameters for A0 framework development. RFCs (remote function calls) are used to call functions on another A0 instance. You can develop and debug A0 natively on your local system while redirecting some functions to A0 instance in docker. This is crucial for development as A0 needs to run in standardized environment to support all features.",
- "fields": dev_fields,
- "tab": "developer",
- }
-
- # code_exec_fields: list[SettingsField] = []
-
- # code_exec_fields.append(
- # {
- # "id": "code_exec_ssh_enabled",
- # "title": "Use SSH for code execution",
- # "description": "Code execution will use SSH to connect to the terminal. When disabled, a local python terminal interface is used instead. SSH should only be used in development environment or when encountering issues with the local python terminal interface.",
- # "type": "switch",
- # "value": settings["code_exec_ssh_enabled"],
- # }
- # )
-
- # code_exec_fields.append(
- # {
- # "id": "code_exec_ssh_addr",
- # "title": "Code execution SSH address",
- # "description": "Address of the SSH server for code execution. Only applies when SSH is enabled.",
- # "type": "text",
- # "value": settings["code_exec_ssh_addr"],
- # }
- # )
-
- # code_exec_fields.append(
- # {
- # "id": "code_exec_ssh_port",
- # "title": "Code execution SSH port",
- # "description": "Port of the SSH server for code execution. Only applies when SSH is enabled.",
- # "type": "text",
- # "value": settings["code_exec_ssh_port"],
- # }
- # )
-
- # code_exec_section: SettingsSection = {
- # "id": "code_exec",
- # "title": "Code execution",
- # "description": "Configuration of code execution by the agent.",
- # "fields": code_exec_fields,
- # "tab": "developer",
- # }
-
- # Speech to text section
- stt_fields: list[SettingsField] = []
-
- stt_fields.append(
- {
- "id": "stt_microphone_section",
- "title": "Microphone device",
- "description": "Select the microphone device to use for speech-to-text.",
- "value": "",
- "type": "html",
- }
- )
-
- stt_fields.append(
- {
- "id": "stt_model_size",
- "title": "Speech-to-text model size",
- "description": "Select the speech-to-text model size",
- "type": "select",
- "value": settings["stt_model_size"],
- "options": [
+ stt_models=[
{"value": "tiny", "label": "Tiny (39M, English)"},
{"value": "base", "label": "Base (74M, English)"},
{"value": "small", "label": "Small (244M, English)"},
@@ -988,293 +266,86 @@ def convert_out(settings: Settings) -> SettingsOutput:
{"value": "large", "label": "Large (1.5B, Multilingual)"},
{"value": "turbo", "label": "Turbo (Multilingual)"},
],
- }
+ ),
)
- stt_fields.append(
- {
- "id": "stt_language",
- "title": "Speech-to-text language code",
- "description": "Language code (e.g. en, fr, it)",
- "type": "text",
- "value": settings["stt_language"],
- }
- )
+ # ensure dropdown options include currently selected values
+ additional = out["additional"]
+ current = out["settings"]
- stt_fields.append(
- {
- "id": "stt_silence_threshold",
- "title": "Microphone silence threshold",
- "description": "Silence detection threshold. Lower values are more sensitive to noise.",
- "type": "range",
- "min": 0,
- "max": 1,
- "step": 0.01,
- "value": settings["stt_silence_threshold"],
- }
+ additional["chat_providers"] = _ensure_option_present(
+ additional.get("chat_providers"), current.get("chat_model_provider")
)
-
- stt_fields.append(
- {
- "id": "stt_silence_duration",
- "title": "Microphone silence duration (ms)",
- "description": "Duration of silence before the system considers speaking to have ended.",
- "type": "text",
- "value": settings["stt_silence_duration"],
- }
+ additional["chat_providers"] = _ensure_option_present(
+ additional.get("chat_providers"), current.get("util_model_provider")
)
-
- stt_fields.append(
- {
- "id": "stt_waiting_timeout",
- "title": "Microphone waiting timeout (ms)",
- "description": "Duration of silence before the system closes the microphone.",
- "type": "text",
- "value": settings["stt_waiting_timeout"],
- }
+ additional["chat_providers"] = _ensure_option_present(
+ additional.get("chat_providers"), current.get("browser_model_provider")
)
-
- # TTS fields
- tts_fields: list[SettingsField] = []
-
- tts_fields.append(
- {
- "id": "tts_kokoro",
- "title": "Enable Kokoro TTS",
- "description": "Enable higher quality server-side AI (Kokoro) instead of browser-based text-to-speech.",
- "type": "switch",
- "value": settings["tts_kokoro"],
- }
+ additional["embedding_providers"] = _ensure_option_present(
+ additional.get("embedding_providers"), current.get("embed_model_provider")
)
-
- speech_section: SettingsSection = {
- "id": "speech",
- "title": "Speech",
- "description": "Voice transcription and speech synthesis settings.",
- "fields": stt_fields + tts_fields,
- "tab": "agent",
- }
-
- # MCP section
- mcp_client_fields: list[SettingsField] = []
-
- mcp_client_fields.append(
- {
- "id": "mcp_servers_config",
- "title": "MCP Servers Configuration",
- "description": "External MCP servers can be configured here.",
- "type": "button",
- "value": "Open",
- }
+ additional["shell_interfaces"] = _ensure_option_present(
+ additional.get("shell_interfaces"), current.get("shell_interface")
)
-
- mcp_client_fields.append(
- {
- "id": "mcp_servers",
- "title": "MCP Servers",
- "description": "(JSON list of) >> RemoteServer <<: [name, url, headers, timeout (opt), sse_read_timeout (opt), disabled (opt)] / >> Local Server <<: [name, command, args, env, encoding (opt), encoding_error_handler (opt), disabled (opt)]",
- "type": "textarea",
- "value": settings["mcp_servers"],
- "hidden": True,
- }
+ additional["agent_subdirs"] = _ensure_option_present(
+ additional.get("agent_subdirs"), current.get("agent_profile")
)
-
- mcp_client_fields.append(
- {
- "id": "mcp_client_init_timeout",
- "title": "MCP Client Init Timeout",
- "description": "Timeout for MCP client initialization (in seconds). Higher values might be required for complex MCPs, but might also slowdown system startup.",
- "type": "number",
- "value": settings["mcp_client_init_timeout"],
- }
+ additional["knowledge_subdirs"] = _ensure_option_present(
+ additional.get("knowledge_subdirs"), current.get("agent_knowledge_subdir")
)
-
- mcp_client_fields.append(
- {
- "id": "mcp_client_tool_timeout",
- "title": "MCP Client Tool Timeout",
- "description": "Timeout for MCP client tool execution. Higher values might be required for complex tools, but might also result in long responses with failing tools.",
- "type": "number",
- "value": settings["mcp_client_tool_timeout"],
- }
- )
-
- mcp_client_section: SettingsSection = {
- "id": "mcp_client",
- "title": "External MCP Servers",
- "description": "Agent Zero can use external MCP servers, local or remote as tools.",
- "fields": mcp_client_fields,
- "tab": "mcp",
- }
-
- # Secrets section
- secrets_fields: list[SettingsField] = []
-
- secrets_manager = SecretsManager.get_instance()
- try:
- secrets = secrets_manager.get_masked_secrets()
- except Exception:
- secrets = ""
-
- secrets_fields.append({
- "id": "variables",
- "title": "Variables Store",
- "description": "Store non-sensitive variables in .env format e.g. EMAIL_IMAP_SERVER=\"imap.gmail.com\", one item per line. You can use comments starting with # to add descriptions for the agent. See example. These variables are visible to LLMs and in chat history, they are not being masked.",
- "type": "textarea",
- "value": settings["variables"].strip(),
- "style": "height: 20em",
- })
-
- secrets_fields.append({
- "id": "secrets",
- "title": "Secrets Store",
- "description": "Store secrets and credentials in .env format e.g. EMAIL_PASSWORD=\"s3cret-p4$$w0rd\", one item per line. You can use comments starting with # to add descriptions for the agent. See example. These variables are not visile to LLMs and in chat history, they are being masked. β οΈ only values with length >= 4 are being masked to prevent false positives. ",
- "type": "textarea",
- "value": secrets,
- "style": "height: 20em",
- })
-
- secrets_section: SettingsSection = {
- "id": "secrets",
- "title": "Secrets Management",
- "description": "Manage secrets and credentials that agents can use without exposing values to LLMs, chat history or logs. Placeholders are automatically replaced with values just before tool calls. If bare passwords occur in tool results, they are masked back to placeholders.",
- "fields": secrets_fields,
- "tab": "external",
- }
-
- mcp_server_fields: list[SettingsField] = []
-
- mcp_server_fields.append(
- {
- "id": "mcp_server_enabled",
- "title": "Enable A0 MCP Server",
- "description": "Expose Agent Zero as an SSE/HTTP MCP server. This will make this A0 instance available to MCP clients.",
- "type": "switch",
- "value": settings["mcp_server_enabled"],
- }
- )
-
- mcp_server_fields.append(
- {
- "id": "mcp_server_token",
- "title": "MCP Server Token",
- "description": "Token for MCP server authentication.",
- "type": "text",
- "hidden": True,
- "value": settings["mcp_server_token"],
- }
- )
-
- mcp_server_section: SettingsSection = {
- "id": "mcp_server",
- "title": "A0 MCP Server",
- "description": "Agent Zero can be exposed as an SSE MCP server. See connection example.",
- "fields": mcp_server_fields,
- "tab": "mcp",
- }
-
- # -------- A2A Section --------
- a2a_fields: list[SettingsField] = []
-
- a2a_fields.append(
- {
- "id": "a2a_server_enabled",
- "title": "Enable A2A server",
- "description": "Expose Agent Zero as A2A server. This allows other agents to connect to A0 via A2A protocol.",
- "type": "switch",
- "value": settings["a2a_server_enabled"],
- }
+ additional["stt_models"] = _ensure_option_present(
+ additional.get("stt_models"), current.get("stt_model_size")
)
- a2a_section: SettingsSection = {
- "id": "a2a_server",
- "title": "A0 A2A Server",
- "description": "Agent Zero can be exposed as an A2A server. See connection example.",
- "fields": a2a_fields,
- "tab": "mcp",
- }
-
-
- # External API section
- external_api_fields: list[SettingsField] = []
+ # masked api keys
+ providers = get_providers("chat") + get_providers("embedding")
+ for provider in providers:
+ provider_name = provider["value"]
+ api_key = settings["api_keys"].get(
+ provider_name, models.get_api_key(provider_name)
+ )
+ settings["api_keys"][provider_name] = (
+ API_KEY_PLACEHOLDER if api_key and api_key != "None" else ""
+ )
- external_api_fields.append(
- {
- "id": "external_api_examples",
- "title": "API Examples",
- "description": "View examples for using Agent Zero's external API endpoints with API key authentication.",
- "type": "button",
- "value": "Show API Examples",
- }
+ # load auth from dotenv
+ out["settings"]["auth_login"] = dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN) or ""
+ out["settings"]["auth_password"] = (
+ PASSWORD_PLACEHOLDER
+ if dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD)
+ else ""
)
-
- external_api_section: SettingsSection = {
- "id": "external_api",
- "title": "External API",
- "description": "Agent Zero provides external API endpoints for integration with other applications. "
- "These endpoints use API key authentication and support text messages and file attachments.",
- "fields": external_api_fields,
- "tab": "external",
- }
-
- # Backup & Restore section
- backup_fields: list[SettingsField] = []
-
- backup_fields.append(
- {
- "id": "backup_create",
- "title": "Create Backup",
- "description": "Create a backup archive of selected files and configurations "
- "using customizable patterns.",
- "type": "button",
- "value": "Create Backup",
- }
+ out["settings"]["rfc_password"] = (
+ PASSWORD_PLACEHOLDER if dotenv.get_dotenv_value(dotenv.KEY_RFC_PASSWORD) else ""
)
-
- backup_fields.append(
- {
- "id": "backup_restore",
- "title": "Restore from Backup",
- "description": "Restore files and configurations from a backup archive "
- "with pattern-based selection.",
- "type": "button",
- "value": "Restore Backup",
- }
+ out["settings"]["root_password"] = (
+ PASSWORD_PLACEHOLDER
+ if dotenv.get_dotenv_value(dotenv.KEY_ROOT_PASSWORD)
+ else ""
)
- backup_section: SettingsSection = {
- "id": "backup_restore",
- "title": "Backup & Restore",
- "description": "Backup and restore Agent Zero data and configurations "
- "using glob pattern-based file selection.",
- "fields": backup_fields,
- "tab": "backup",
- }
-
- # Add the section to the result
- result: SettingsOutput = {
- "sections": [
- agent_section,
- chat_model_section,
- util_model_section,
- browser_model_section,
- embed_model_section,
- memory_section,
- speech_section,
- api_keys_section,
- litellm_section,
- secrets_section,
- auth_section,
- mcp_client_section,
- mcp_server_section,
- a2a_section,
- external_api_section,
- backup_section,
- dev_section,
- # code_exec_section,
- ]
- }
- return result
+ # secrets
+ secrets_manager = get_default_secrets_manager()
+ try:
+ out["settings"]["secrets"] = secrets_manager.get_masked_secrets()
+ except Exception:
+ out["settings"]["secrets"] = ""
+
+ # mask API keys before sending to frontend
+ if isinstance(out["settings"].get("api_keys"), dict):
+ for provider, value in list(out["settings"]["api_keys"].items()):
+ if value:
+ out["settings"]["api_keys"][provider] = API_KEY_PLACEHOLDER
+
+ # normalize certain fields
+ for key, value in list(out["settings"].items()):
+ # convert kwargs dicts to .env format
+ if (key.endswith("_kwargs") or key == "browser_http_headers") and isinstance(
+ value, dict
+ ):
+ out["settings"][key] = _dict_to_env(value)
+ return out
def _get_api_key_field(settings: Settings, provider: str, title: str) -> SettingsField:
@@ -1288,27 +359,21 @@ def _get_api_key_field(settings: Settings, provider: str, title: str) -> Setting
}
-def convert_in(settings: dict) -> Settings:
+def convert_in(settings: Settings) -> Settings:
current = get_settings()
- for section in settings["sections"]:
- if "fields" in section:
- for field in section["fields"]:
- # Skip saving if value is a placeholder
- should_skip = (
- field["value"] == PASSWORD_PLACEHOLDER or
- field["value"] == API_KEY_PLACEHOLDER
- )
- if not should_skip:
- # Special handling for browser_http_headers
- if field["id"] == "browser_http_headers" or field["id"].endswith("_kwargs"):
- current[field["id"]] = _env_to_dict(field["value"])
- elif field["id"].startswith("api_key_"):
- current["api_keys"][field["id"]] = field["value"]
- else:
- current[field["id"]] = field["value"]
+ for key, value in settings.items():
+ # Special handling for browser_http_headers and *_kwargs (stored as .env text)
+ if (key == "browser_http_headers" or key.endswith("_kwargs")) and isinstance(
+ value, str
+ ):
+ current[key] = _env_to_dict(value)
+ continue
+
+ current[key] = value
return current
+
def get_settings() -> Settings:
global _settings
if not _settings:
@@ -1326,12 +391,19 @@ def set_settings(settings: Settings, apply: bool = True):
_write_settings_file(_settings)
if apply:
_apply_settings(previous)
+ return _settings
def set_settings_delta(delta: dict, apply: bool = True):
current = get_settings()
new = {**current, **delta}
- set_settings(new, apply) # type: ignore
+ return set_settings(new, apply) # type: ignore
+
+
+def merge_settings(original: Settings, delta: dict) -> Settings:
+ merged = original.copy()
+ merged.update(delta)
+ return merged
def normalize_settings(settings: Settings) -> Settings:
@@ -1403,106 +475,129 @@ def _remove_sensitive_settings(settings: Settings):
def _write_sensitive_settings(settings: Settings):
for key, val in settings["api_keys"].items():
- dotenv.save_dotenv_value(key.upper(), val)
+ if val != API_KEY_PLACEHOLDER:
+ dotenv.save_dotenv_value(key.upper(), val)
dotenv.save_dotenv_value(dotenv.KEY_AUTH_LOGIN, settings["auth_login"])
- if settings["auth_password"]:
+ if settings["auth_password"] != PASSWORD_PLACEHOLDER:
dotenv.save_dotenv_value(dotenv.KEY_AUTH_PASSWORD, settings["auth_password"])
- if settings["rfc_password"]:
+ if settings["rfc_password"] != PASSWORD_PLACEHOLDER:
dotenv.save_dotenv_value(dotenv.KEY_RFC_PASSWORD, settings["rfc_password"])
-
- if settings["root_password"]:
- dotenv.save_dotenv_value(dotenv.KEY_ROOT_PASSWORD, settings["root_password"])
- if settings["root_password"]:
- set_root_password(settings["root_password"])
+ if settings["root_password"] != PASSWORD_PLACEHOLDER:
+ if runtime.is_dockerized():
+ dotenv.save_dotenv_value(
+ dotenv.KEY_ROOT_PASSWORD, settings["root_password"]
+ )
+ set_root_password(settings["root_password"])
# Handle secrets separately - merge with existing preserving comments/order and support deletions
- secrets_manager = SecretsManager.get_instance()
+ secrets_manager = get_default_secrets_manager()
submitted_content = settings["secrets"]
secrets_manager.save_secrets_with_merge(submitted_content)
- secrets_manager.clear_cache() # Clear cache to reload secrets
-
def get_default_settings() -> Settings:
return Settings(
version=_get_version(),
- chat_model_provider="openrouter",
- chat_model_name="openai/gpt-4.1",
- chat_model_api_base="",
- chat_model_kwargs={"temperature": "0"},
- chat_model_ctx_length=100000,
- chat_model_ctx_history=0.7,
- chat_model_vision=True,
- chat_model_rl_requests=0,
- chat_model_rl_input=0,
- chat_model_rl_output=0,
- util_model_provider="openrouter",
- util_model_name="openai/gpt-4.1-mini",
- util_model_api_base="",
- util_model_ctx_length=100000,
- util_model_ctx_input=0.7,
- util_model_kwargs={"temperature": "0"},
- util_model_rl_requests=0,
- util_model_rl_input=0,
- util_model_rl_output=0,
- embed_model_provider="huggingface",
- embed_model_name="sentence-transformers/all-MiniLM-L6-v2",
- embed_model_api_base="",
- embed_model_kwargs={},
- embed_model_rl_requests=0,
- embed_model_rl_input=0,
- browser_model_provider="openrouter",
- browser_model_name="openai/gpt-4.1",
- browser_model_api_base="",
- browser_model_vision=True,
- browser_model_rl_requests=0,
- browser_model_rl_input=0,
- browser_model_rl_output=0,
- browser_model_kwargs={"temperature": "0"},
- browser_http_headers={},
- memory_recall_enabled=True,
- memory_recall_delayed=False,
- memory_recall_interval=3,
- memory_recall_history_len=10000,
- memory_recall_memories_max_search=12,
- memory_recall_solutions_max_search=8,
- memory_recall_memories_max_result=5,
- memory_recall_solutions_max_result=3,
- memory_recall_similarity_threshold=0.7,
- memory_recall_query_prep=True,
- memory_recall_post_filter=True,
- memory_memorize_enabled=True,
- memory_memorize_consolidation=True,
- memory_memorize_replace_threshold=0.9,
+ chat_model_provider=get_default_value("chat_model_provider", "openrouter"),
+ chat_model_name=get_default_value("chat_model_name", "openai/gpt-4.1"),
+ chat_model_api_base=get_default_value("chat_model_api_base", ""),
+ chat_model_kwargs=get_default_value("chat_model_kwargs", {"temperature": "0"}),
+ chat_model_ctx_length=get_default_value("chat_model_ctx_length", 100000),
+ chat_model_ctx_history=get_default_value("chat_model_ctx_history", 0.7),
+ chat_model_vision=get_default_value("chat_model_vision", True),
+ chat_model_rl_requests=get_default_value("chat_model_rl_requests", 0),
+ chat_model_rl_input=get_default_value("chat_model_rl_input", 0),
+ chat_model_rl_output=get_default_value("chat_model_rl_output", 0),
+ util_model_provider=get_default_value("util_model_provider", "openrouter"),
+ util_model_name=get_default_value("util_model_name", "openai/gpt-4.1-mini"),
+ util_model_api_base=get_default_value("util_model_api_base", ""),
+ util_model_ctx_length=get_default_value("util_model_ctx_length", 100000),
+ util_model_ctx_input=get_default_value("util_model_ctx_input", 0.7),
+ util_model_kwargs=get_default_value("util_model_kwargs", {"temperature": "0"}),
+ util_model_rl_requests=get_default_value("util_model_rl_requests", 0),
+ util_model_rl_input=get_default_value("util_model_rl_input", 0),
+ util_model_rl_output=get_default_value("util_model_rl_output", 0),
+ embed_model_provider=get_default_value("embed_model_provider", "huggingface"),
+ embed_model_name=get_default_value(
+ "embed_model_name", "sentence-transformers/all-MiniLM-L6-v2"
+ ),
+ embed_model_api_base=get_default_value("embed_model_api_base", ""),
+ embed_model_kwargs=get_default_value("embed_model_kwargs", {}),
+ embed_model_rl_requests=get_default_value("embed_model_rl_requests", 0),
+ embed_model_rl_input=get_default_value("embed_model_rl_input", 0),
+ browser_model_provider=get_default_value(
+ "browser_model_provider", "openrouter"
+ ),
+ browser_model_name=get_default_value("browser_model_name", "openai/gpt-4.1"),
+ browser_model_api_base=get_default_value("browser_model_api_base", ""),
+ browser_model_vision=get_default_value("browser_model_vision", True),
+ browser_model_rl_requests=get_default_value("browser_model_rl_requests", 0),
+ browser_model_rl_input=get_default_value("browser_model_rl_input", 0),
+ browser_model_rl_output=get_default_value("browser_model_rl_output", 0),
+ browser_model_kwargs=get_default_value(
+ "browser_model_kwargs", {"temperature": "0"}
+ ),
+ browser_http_headers=get_default_value("browser_http_headers", {}),
+ memory_recall_enabled=get_default_value("memory_recall_enabled", True),
+ memory_recall_delayed=get_default_value("memory_recall_delayed", False),
+ memory_recall_interval=get_default_value("memory_recall_interval", 3),
+ memory_recall_history_len=get_default_value("memory_recall_history_len", 10000),
+ memory_recall_memories_max_search=get_default_value(
+ "memory_recall_memories_max_search", 12
+ ),
+ memory_recall_solutions_max_search=get_default_value(
+ "memory_recall_solutions_max_search", 8
+ ),
+ memory_recall_memories_max_result=get_default_value(
+ "memory_recall_memories_max_result", 5
+ ),
+ memory_recall_solutions_max_result=get_default_value(
+ "memory_recall_solutions_max_result", 3
+ ),
+ memory_recall_similarity_threshold=get_default_value(
+ "memory_recall_similarity_threshold", 0.7
+ ),
+ memory_recall_query_prep=get_default_value("memory_recall_query_prep", True),
+ memory_recall_post_filter=get_default_value("memory_recall_post_filter", True),
+ memory_memorize_enabled=get_default_value("memory_memorize_enabled", True),
+ memory_memorize_consolidation=get_default_value(
+ "memory_memorize_consolidation", True
+ ),
+ memory_memorize_replace_threshold=get_default_value(
+ "memory_memorize_replace_threshold", 0.9
+ ),
api_keys={},
auth_login="",
auth_password="",
root_password="",
- agent_profile="agent0",
- agent_memory_subdir="default",
- agent_knowledge_subdir="custom",
- rfc_auto_docker=True,
- rfc_url="localhost",
+ agent_profile=get_default_value("agent_profile", "agent0"),
+ agent_memory_subdir=get_default_value("agent_memory_subdir", "default"),
+ agent_knowledge_subdir=get_default_value("agent_knowledge_subdir", "custom"),
+ rfc_auto_docker=get_default_value("rfc_auto_docker", True),
+ rfc_url=get_default_value("rfc_url", "localhost"),
rfc_password="",
- rfc_port_http=55080,
- rfc_port_ssh=55022,
- shell_interface="local" if runtime.is_dockerized() else "ssh",
- stt_model_size="base",
- stt_language="en",
- stt_silence_threshold=0.3,
- stt_silence_duration=1000,
- stt_waiting_timeout=2000,
- tts_kokoro=True,
- mcp_servers='{\n "mcpServers": {}\n}',
- mcp_client_init_timeout=10,
- mcp_client_tool_timeout=120,
- mcp_server_enabled=False,
+ rfc_port_http=get_default_value("rfc_port_http", 55080),
+ rfc_port_ssh=get_default_value("rfc_port_ssh", 55022),
+ shell_interface=get_default_value(
+ "shell_interface", "local" if runtime.is_dockerized() else "ssh"
+ ),
+ stt_model_size=get_default_value("stt_model_size", "base"),
+ stt_language=get_default_value("stt_language", "en"),
+ stt_silence_threshold=get_default_value("stt_silence_threshold", 0.3),
+ stt_silence_duration=get_default_value("stt_silence_duration", 1000),
+ stt_waiting_timeout=get_default_value("stt_waiting_timeout", 2000),
+ tts_kokoro=get_default_value("tts_kokoro", True),
+ mcp_servers=get_default_value("mcp_servers", '{\n "mcpServers": {}\n}'),
+ mcp_client_init_timeout=get_default_value("mcp_client_init_timeout", 10),
+ mcp_client_tool_timeout=get_default_value("mcp_client_tool_timeout", 120),
+ mcp_server_enabled=get_default_value("mcp_server_enabled", False),
mcp_server_token=create_auth_token(),
- a2a_server_enabled=False,
+ a2a_server_enabled=get_default_value("a2a_server_enabled", False),
variables="",
secrets="",
- litellm_global_kwargs={},
+ litellm_global_kwargs=get_default_value("litellm_global_kwargs", {}),
+ update_check_enabled=get_default_value("update_check_enabled", True),
)
@@ -1586,9 +681,7 @@ async def update_mcp_settings(mcp_servers: str):
) # TODO overkill, replace with background task
# update token in mcp server
- current_token = (
- create_auth_token()
- ) # TODO - ugly, token in settings is generated from dotenv and does not always correspond
+ current_token = create_auth_token() # TODO - ugly, token in settings is generated from dotenv and does not always correspond
if not previous or current_token != previous["mcp_server_token"]:
async def update_mcp_token(token: str):
@@ -1617,16 +710,16 @@ def _env_to_dict(data: str):
result = {}
for line in data.splitlines():
line = line.strip()
- if not line or line.startswith('#'):
+ if not line or line.startswith("#"):
continue
-
- if '=' not in line:
+
+ if "=" not in line:
continue
-
- key, value = line.split('=', 1)
+
+ key, value = line.split("=", 1)
key = key.strip()
value = value.strip()
-
+
# If quoted, treat as string
if value.startswith('"') and value.endswith('"'):
result[key] = value[1:-1].replace('\\"', '"') # Unescape quotes
@@ -1638,7 +731,7 @@ def _env_to_dict(data: str):
result[key] = json.loads(value)
except (json.JSONDecodeError, ValueError):
result[key] = value
-
+
return result
@@ -1651,11 +744,11 @@ def _dict_to_env(data_dict):
lines.append(f'{key}="{escaped_value}"')
elif isinstance(value, (dict, list, bool)) or value is None:
# Serialize as unquoted JSON
- lines.append(f'{key}={json.dumps(value, separators=(",", ":"))}')
+ lines.append(f"{key}={json.dumps(value, separators=(',', ':'))}")
else:
# Numbers and other types as unquoted strings
- lines.append(f'{key}={value}')
-
+ lines.append(f"{key}={value}")
+
return "\n".join(lines)
@@ -1707,8 +800,4 @@ def create_auth_token() -> str:
def _get_version():
- try:
- git_info = git.get_git_info()
- return str(git_info.get("short_tag", "")).strip() or "unknown"
- except Exception:
- return "unknown"
+ return git.get_version()
diff --git a/python/helpers/shell_local.py b/python/helpers/shell_local.py
index cc0815f104..dcc14398d8 100644
--- a/python/helpers/shell_local.py
+++ b/python/helpers/shell_local.py
@@ -1,18 +1,20 @@
+import platform
import select
import subprocess
import time
import sys
from typing import Optional, Tuple
-from python.helpers import tty_session
+from python.helpers import tty_session, runtime
from python.helpers.shell_ssh import clean_string
class LocalInteractiveSession:
- def __init__(self):
+ def __init__(self, cwd: str|None = None):
self.session: tty_session.TTYSession|None = None
self.full_output = ''
+ self.cwd = cwd
async def connect(self):
- self.session = tty_session.TTYSession("/bin/bash")
+ self.session = tty_session.TTYSession(runtime.get_terminal_executable(), cwd=self.cwd)
await self.session.start()
await self.session.read_full_until_idle(idle_timeout=1, total_timeout=1)
diff --git a/python/helpers/shell_ssh.py b/python/helpers/shell_ssh.py
index 3d368eb402..bafcdbea08 100644
--- a/python/helpers/shell_ssh.py
+++ b/python/helpers/shell_ssh.py
@@ -14,7 +14,7 @@ class SSHInteractiveSession:
# ps1_label = "SSHInteractiveSession CLI>"
def __init__(
- self, logger: Log, hostname: str, port: int, username: str, password: str
+ self, logger: Log, hostname: str, port: int, username: str, password: str, cwd: str|None = None
):
self.logger = logger
self.hostname = hostname
@@ -27,6 +27,7 @@ def __init__(
self.full_output = b""
self.last_command = b""
self.trimmed_command_length = 0 # Initialize trimmed_command_length
+ self.cwd = cwd
async def connect(self, keepalive_interval: int = 5):
"""
@@ -60,7 +61,12 @@ async def connect(self, keepalive_interval: int = 5):
# invoke interactive shell
self.shell = self.client.invoke_shell(width=100, height=50)
- self.shell.send("stty -echo\n".encode()) # disable local echo
+
+ # disable systemd/OSC prompt metadata and disable local echo
+ initial_command = "unset PROMPT_COMMAND PS0; stty -echo"
+ if self.cwd:
+ initial_command = f"cd {self.cwd}; {initial_command}"
+ self.shell.send(f"{initial_command}\n".encode())
# wait for initial prompt/output to settle
while True:
@@ -99,7 +105,7 @@ async def send_command(self, command: str):
self.last_command = command.encode()
self.trimmed_command_length = 0
self.shell.send(self.last_command)
-
+
async def read_output(
self, timeout: float = 0, reset_full_output: bool = False
) -> Tuple[str, str]:
diff --git a/python/helpers/strings.py b/python/helpers/strings.py
index 1144b1f3b9..dac89c63e8 100644
--- a/python/helpers/strings.py
+++ b/python/helpers/strings.py
@@ -168,6 +168,7 @@ def _repl(match):
path = match.group(1)
try:
# read file content
+ path = files.fix_dev_path(path)
return files.read_file(path)
except Exception:
# if file not readable keep original placeholder
diff --git a/python/helpers/subagents.py b/python/helpers/subagents.py
new file mode 100644
index 0000000000..66f320f001
--- /dev/null
+++ b/python/helpers/subagents.py
@@ -0,0 +1,308 @@
+from python.helpers import files
+from typing import TypedDict, TYPE_CHECKING
+from pydantic import BaseModel, model_validator
+import json
+from typing import Literal
+
+GLOBAL_DIR = "."
+USER_DIR = "usr"
+DEFAULT_AGENTS_DIR = "agents"
+USER_AGENTS_DIR = "usr/agents"
+
+type Origin = Literal["default", "user", "project"]
+
+if TYPE_CHECKING:
+ from agent import Agent
+
+
+class SubAgentListItem(BaseModel):
+ name: str = ""
+ title: str = ""
+ description: str = ""
+ context: str = ""
+ origin: list[Origin] = []
+ enabled: bool = True
+
+ @model_validator(mode="after")
+ def post_validator(self):
+ if self.title == "":
+ self.title = self.name
+ return self
+
+
+class SubAgent(SubAgentListItem):
+ prompts: dict[str, str] = {}
+
+
+def get_agents_list(project_name: str | None = None) -> list[SubAgentListItem]:
+ return list(get_agents_dict(project_name).values())
+
+
+def get_agents_dict(
+ project_name: str | None = None,
+) -> dict[str, SubAgentListItem]:
+ def _merge_agent_dicts(
+ base: dict[str, SubAgentListItem],
+ overrides: dict[str, SubAgentListItem],
+ ) -> dict[str, SubAgentListItem]:
+ merged: dict[str, SubAgentListItem] = dict(base)
+ for name, override in overrides.items():
+ base_agent = merged.get(name)
+ merged[name] = (
+ _merge_agent_list_items(base_agent, override)
+ if base_agent
+ else override
+ )
+ return merged
+
+ # load default and custom agents and merge
+ default_agents = _get_agents_list_from_dir(DEFAULT_AGENTS_DIR, origin="default")
+ custom_agents = _get_agents_list_from_dir(USER_AGENTS_DIR, origin="user")
+ merged = _merge_agent_dicts(default_agents, custom_agents)
+
+ # merge with project agents if possible
+ if project_name:
+ from python.helpers import projects
+
+ project_agents_dir = projects.get_project_meta_folder(project_name, "agents")
+ project_agents = _get_agents_list_from_dir(project_agents_dir, origin="project")
+ merged = _merge_agent_dicts(merged, project_agents)
+
+ return merged
+
+
+def _get_agents_list_from_dir(dir: str, origin: Origin) -> dict[str, SubAgentListItem]:
+ result: dict[str, SubAgentListItem] = {}
+ subdirs = files.get_subdirectories(dir)
+
+ for subdir in subdirs:
+ try:
+ agent_json = files.read_file(files.get_abs_path(dir, subdir, "agent.json"))
+ agent_data = SubAgentListItem.model_validate_json(agent_json)
+ name = agent_data.name or subdir
+ agent_data.name = name
+ agent_data.origin = [origin]
+ result[name] = agent_data
+ except Exception:
+ continue
+
+ return result
+
+
+def load_agent_data(name: str, project_name: str | None = None) -> SubAgent:
+ def _merge_agent(
+ original: SubAgent | None, override: SubAgent | None = None
+ ) -> SubAgent | None:
+ if original and override:
+ return _merge_agents(original, override)
+ elif original:
+ return original
+ return override
+
+ # load default and user agents and merge
+ default_agent = _load_agent_data_from_dir(
+ DEFAULT_AGENTS_DIR, name, origin="default"
+ )
+ user_agent = _load_agent_data_from_dir(USER_AGENTS_DIR, name, origin="user")
+ merged = _merge_agent(default_agent, user_agent)
+
+ # merge with project agent if possible
+ if project_name:
+ from python.helpers import projects
+
+ project_agents_dir = projects.get_project_meta_folder(project_name, "agents")
+ project_agent = _load_agent_data_from_dir(
+ project_agents_dir, name, origin="project"
+ )
+ merged = _merge_agent(merged, project_agent)
+
+ if merged is None:
+ raise FileNotFoundError(
+ f"Agent '{name}' not found in default or custom directories"
+ )
+
+ return merged
+
+
+def save_agent_data(name: str, subagent: SubAgent) -> None:
+ # write agent.json in custom directory
+ agent_dir = f"{USER_AGENTS_DIR}/{name}"
+ agent_json = {
+ "title": subagent.title,
+ "description": subagent.description,
+ "context": subagent.context,
+ "enabled": subagent.enabled,
+ }
+ files.write_file(f"{agent_dir}/agent.json", json.dumps(agent_json, indent=2))
+
+ # replace prompts in custom directory
+ prompts_dir = f"{agent_dir}/prompts"
+ # clear existing custom prompts directory (if any)
+ files.delete_dir(prompts_dir)
+
+ prompts = subagent.prompts or {}
+ for name, content in prompts.items():
+ safe_name = files.safe_file_name(name)
+ if not safe_name.endswith(".md"):
+ safe_name += ".md"
+ files.write_file(f"{prompts_dir}/{safe_name}", content)
+
+
+def delete_agent_data(name: str) -> None:
+ files.delete_dir(f"{USER_AGENTS_DIR}/{name}")
+
+
+def _load_agent_data_from_dir(dir: str, name: str, origin: Origin) -> SubAgent | None:
+ try:
+ subagent_json = files.read_file(files.get_abs_path(dir, name, "agent.json"))
+ subagent = SubAgent.model_validate_json(subagent_json)
+ except Exception:
+ # backward compatibility (before agent.json existed)
+ try:
+ context_file = files.read_file(files.get_abs_path(dir, name, "_context.md"))
+ except Exception:
+ context_file = ""
+ subagent = SubAgent(
+ name=name,
+ title=name,
+ description="",
+ context=context_file,
+ origin=[origin],
+ prompts={},
+ )
+
+ # non-stored fields
+ subagent.name = name
+ subagent.origin = [origin]
+
+ prompts_dir = f"{dir}/{name}/prompts"
+ try:
+ prompts = files.read_text_files_in_dir(prompts_dir, pattern="*.md")
+ except Exception:
+ prompts = {}
+
+ subagent.prompts = prompts or {}
+ return subagent
+
+
+def _merge_agents(base: SubAgent | None, override: SubAgent | None) -> SubAgent | None:
+ if base is None:
+ return override
+ if override is None:
+ return base
+
+ merged_prompts: dict[str, str] = {}
+ merged_prompts.update(base.prompts or {})
+ merged_prompts.update(override.prompts or {})
+
+ return SubAgent(
+ name=override.name,
+ title=override.title,
+ description=override.description,
+ context=override.context,
+ origin=_merge_origins(base.origin, override.origin),
+ prompts=merged_prompts,
+ )
+
+
+def _merge_agent_list_items(
+ base: SubAgentListItem, override: SubAgentListItem
+) -> SubAgentListItem:
+ return SubAgentListItem(
+ name=override.name or base.name,
+ title=override.title or base.title,
+ description=override.description or base.description,
+ context=override.context or base.context,
+ origin=_merge_origins(base.origin, override.origin),
+ )
+
+
+def _merge_origins(base: list[Origin], override: list[Origin]) -> list[Origin]:
+ return base + override
+
+
+def get_default_promp_file_names() -> list[str]:
+ return files.list_files("prompts", filter="*.md")
+
+
+def get_available_agents_dict(
+ project_name: str | None,
+) -> dict[str, SubAgentListItem]:
+ # all available agents
+ all_agents = get_agents_dict()
+ # filter by project settings
+ from python.helpers import projects
+
+ project_settings = (
+ projects.load_project_subagents(project_name) if project_name else {}
+ )
+
+ filtered_agents: dict[str, SubAgentListItem] = {}
+ for name, agent in all_agents.items():
+ if name in project_settings:
+ agent.enabled = project_settings[name]["enabled"]
+ if agent.enabled:
+ filtered_agents[name] = agent
+ return filtered_agents
+
+
+def get_paths(
+ agent: "Agent|None",
+ *subpaths,
+ must_exist_completely: bool = True,
+ include_project: bool = True,
+ include_user: bool = True,
+ include_default: bool = True,
+ default_root: str = "",
+) -> list[str]:
+ """Returns list of file paths for the given agent and subpaths, searched in order of priority:
+ project/agents/, project/, usr/agents/, agents/, usr/, default."""
+ paths: list[str] = []
+ check_subpaths = subpaths if must_exist_completely else []
+ profile_name = agent.config.profile if agent and agent.config.profile else ""
+ project_name = ""
+
+ if include_project and agent:
+ from python.helpers import projects
+
+ project_name = projects.get_context_project_name(agent.context) or ""
+
+ if project_name and profile_name:
+ # project/agents//...
+ project_agent_dir = projects.get_project_meta_folder(
+ project_name, "agents", profile_name
+ )
+ if files.exists(files.get_abs_path(project_agent_dir, *check_subpaths)):
+ paths.append(files.get_abs_path(project_agent_dir, *subpaths))
+
+ if project_name:
+ # project/.a0proj/...
+ path = projects.get_project_meta_folder(project_name, *subpaths)
+ if (not must_exist_completely) or files.exists(path):
+ paths.append(path)
+
+ if profile_name:
+
+ # usr/agents//...
+ path = files.get_abs_path(USER_AGENTS_DIR, profile_name, *subpaths)
+ if (not must_exist_completely) or files.exists(files.get_abs_path(USER_AGENTS_DIR, profile_name, *check_subpaths)):
+ paths.append(path)
+
+ # agents//...
+ path = files.get_abs_path(DEFAULT_AGENTS_DIR, profile_name, *subpaths)
+ if (not must_exist_completely) or files.exists(files.get_abs_path(DEFAULT_AGENTS_DIR, profile_name, *check_subpaths)):
+ paths.append(path)
+
+ if include_user:
+ # usr/...
+ path = files.get_abs_path(USER_DIR, *subpaths)
+ if (not must_exist_completely) or files.exists(path):
+ paths.append(path)
+
+ if include_default:
+ # default_root/...
+ path = files.get_abs_path(default_root, *subpaths)
+ if (not must_exist_completely) or files.exists(path):
+ paths.append(path)
+
+ return paths
diff --git a/python/helpers/task_scheduler.py b/python/helpers/task_scheduler.py
index 9bfabcece6..1938db367e 100644
--- a/python/helpers/task_scheduler.py
+++ b/python/helpers/task_scheduler.py
@@ -22,6 +22,7 @@
from python.helpers.defer import DeferredTask
from python.helpers.files import get_abs_path, make_dirs, read_file, write_file
from python.helpers.localization import Localization
+from python.helpers import projects, guids
import pytz
from typing import Annotated
@@ -117,13 +118,15 @@ def should_launch(self) -> datetime | None:
class BaseTask(BaseModel):
- uuid: str = Field(default_factory=lambda: str(uuid.uuid4()))
+ uuid: str = Field(default_factory=lambda: guids.generate_id())
context_id: Optional[str] = Field(default=None)
state: TaskState = Field(default=TaskState.IDLE)
name: str = Field()
system_prompt: str
prompt: str
attachments: list[str] = Field(default_factory=list)
+ project_name: str | None = Field(default=None)
+ project_color: str | None = Field(default=None)
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
last_run: datetime | None = None
@@ -181,6 +184,9 @@ def check_schedule(self, frequency_seconds: float = 60.0) -> bool:
def get_next_run(self) -> datetime | None:
return None
+ def is_dedicated(self) -> bool:
+ return self.context_id == self.uuid
+
def get_next_run_minutes(self) -> int | None:
next_run = self.get_next_run()
if next_run is None:
@@ -209,7 +215,7 @@ async def on_error(self, error: str):
last_result=f"ERROR: {error}"
)
if not updated_task:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
f"Failed to update task {self.uuid} state to ERROR after error: {error}"
)
await scheduler.save() # Force save after update
@@ -225,7 +231,7 @@ async def on_success(self, result: str):
last_result=result
)
if not updated_task:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
f"Failed to update task {self.uuid} state to IDLE after success"
)
await scheduler.save() # Force save after update
@@ -243,14 +249,18 @@ def create(
prompt: str,
token: str,
attachments: list[str] = list(),
- context_id: str | None = None
+ context_id: str | None = None,
+ project_name: str | None = None,
+ project_color: str | None = None
):
return cls(name=name,
system_prompt=system_prompt,
prompt=prompt,
attachments=attachments,
token=token,
- context_id=context_id)
+ context_id=context_id,
+ project_name=project_name,
+ project_color=project_color)
def update(self,
name: str | None = None,
@@ -288,7 +298,9 @@ def create(
schedule: TaskSchedule,
attachments: list[str] = list(),
context_id: str | None = None,
- timezone: str | None = None
+ timezone: str | None = None,
+ project_name: str | None = None,
+ project_color: str | None = None,
):
# Set timezone in schedule if provided
if timezone is not None:
@@ -301,7 +313,9 @@ def create(
prompt=prompt,
attachments=attachments,
schedule=schedule,
- context_id=context_id)
+ context_id=context_id,
+ project_name=project_name,
+ project_color=project_color)
def update(self,
name: str | None = None,
@@ -365,14 +379,18 @@ def create(
prompt: str,
plan: TaskPlan,
attachments: list[str] = list(),
- context_id: str | None = None
+ context_id: str | None = None,
+ project_name: str | None = None,
+ project_color: str | None = None
):
return cls(name=name,
system_prompt=system_prompt,
prompt=prompt,
plan=plan,
attachments=attachments,
- context_id=context_id)
+ context_id=context_id,
+ project_name=project_name,
+ project_color=project_color)
def update(self,
name: str | None = None,
@@ -486,12 +504,12 @@ async def save(self) -> "SchedulerTaskList":
for task in self.tasks:
if isinstance(task, AdHocTask):
if task.token is None or task.token == "":
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.warning(
f"WARNING: AdHocTask {task.name} ({task.uuid}) has a null or empty token before saving: '{task.token}'"
)
# Generate a new token to prevent errors
task.token = str(random.randint(1000000000000000000, 9999999999999999999))
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.info(
f"Fixed: Generated new token '{task.token}' for task {task.name}"
)
@@ -504,7 +522,7 @@ async def save(self) -> "SchedulerTaskList":
# Debug: check if 'null' appears as token value in JSON
if '"type": "adhoc"' in json_data and '"token": null' in json_data:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
"ERROR: Found null token in JSON output for an adhoc task"
)
@@ -514,7 +532,7 @@ async def save(self) -> "SchedulerTaskList":
if exists(path):
loaded_json = read_file(path)
if '"type": "adhoc"' in loaded_json and '"token": null' in loaded_json:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
"ERROR: Null token persisted in JSON file for an adhoc task"
)
@@ -601,6 +619,8 @@ class TaskScheduler:
_tasks: SchedulerTaskList
_printer: PrintStyle
_instance = None
+ _running_deferred_tasks: Dict[str, DeferredTask]
+ _running_tasks_lock: threading.RLock
@classmethod
def get(cls) -> "TaskScheduler":
@@ -613,8 +633,38 @@ def __init__(self):
if not hasattr(self, '_initialized'):
self._tasks = SchedulerTaskList.get()
self._printer = PrintStyle(italic=True, font_color="green", padding=False)
+ self._running_deferred_tasks = {}
+ self._running_tasks_lock = threading.RLock()
self._initialized = True
+ def _register_running_task(self, task_uuid: str, deferred_task: DeferredTask) -> None:
+ with self._running_tasks_lock:
+ self._running_deferred_tasks[task_uuid] = deferred_task
+
+ def _unregister_running_task(self, task_uuid: str) -> None:
+ with self._running_tasks_lock:
+ self._running_deferred_tasks.pop(task_uuid, None)
+
+ def cancel_running_task(self, task_uuid: str, terminate_thread: bool = False) -> bool:
+ with self._running_tasks_lock:
+ deferred_task = self._running_deferred_tasks.get(task_uuid)
+ if not deferred_task:
+ return False
+ PrintStyle.info(f"Scheduler cancelling task {task_uuid}")
+ deferred_task.kill(terminate_thread=terminate_thread)
+ return True
+
+ def cancel_tasks_by_context(self, context_id: str, terminate_thread: bool = False) -> bool:
+ cancelled_any = False
+ with self._running_tasks_lock:
+ running_tasks = list(self._running_deferred_tasks.keys())
+ for task_uuid in running_tasks:
+ task = self.get_task_by_uuid(task_uuid)
+ if task and task.context_id == context_id:
+ if self.cancel_running_task(task_uuid, terminate_thread=terminate_thread):
+ cancelled_any = True
+ return cancelled_any
+
async def reload(self):
await self._tasks.reload()
@@ -669,7 +719,7 @@ async def run_task_by_uuid(self, task_uuid: str, task_context: str | None = None
# If the task is in error state, reset it to IDLE first
if task.state == TaskState.ERROR:
- self._printer.print(f"Resetting task '{task.name}' from ERROR to IDLE state before running")
+ PrintStyle.info(f"Resetting task '{task.name}' from ERROR to IDLE state before running")
await self.update_task(task_uuid, state=TaskState.IDLE)
# Force a reload to ensure we have the updated state
await self._tasks.reload()
@@ -719,6 +769,10 @@ async def __new_context(self, task: Union[ScheduledTask, AdHocTask, PlannedTask]
# initial name before renaming is same as task name
# context.name = task.name
+ # Activate project if set
+ if task.project_name:
+ projects.activate_project(context.id, task.project_name)
+
# Save the context
save_tmp_chat(context)
return context
@@ -728,13 +782,13 @@ async def _get_chat_context(self, task: Union[ScheduledTask, AdHocTask, PlannedT
if context:
assert isinstance(context, AgentContext)
- self._printer.print(
+ PrintStyle.info(
f"Scheduler Task {task.name} loaded from task {task.uuid}, context ok"
)
save_tmp_chat(context)
return context
else:
- self._printer.print(
+ PrintStyle.warning(
f"Scheduler Task {task.name} loaded from task {task.uuid} but context not found"
)
return await self.__new_context(task)
@@ -751,20 +805,24 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
# preflight checks with a snapshot of the task
task_snapshot: Union[ScheduledTask, AdHocTask, PlannedTask] | None = self.get_task_by_uuid(task_uuid)
if task_snapshot is None:
- self._printer.print(f"Scheduler Task with UUID '{task_uuid}' not found")
+ PrintStyle.error(f"Scheduler Task with UUID '{task_uuid}' not found")
+ self._unregister_running_task(task_uuid)
return
if task_snapshot.state == TaskState.RUNNING:
- self._printer.print(f"Scheduler Task '{task_snapshot.name}' already running, skipping")
+ PrintStyle.warning(f"Scheduler Task '{task_snapshot.name}' already running, skipping")
+ self._unregister_running_task(task_uuid)
return
# Atomically fetch and check the task's current state
current_task = await self.update_task_checked(task_uuid, lambda task: task.state != TaskState.RUNNING, state=TaskState.RUNNING)
if not current_task:
- self._printer.print(f"Scheduler Task with UUID '{task_uuid}' not found or updated by another process")
+ PrintStyle.error(f"Scheduler Task with UUID '{task_uuid}' not found or updated by another process")
+ self._unregister_running_task(task_uuid)
return
if current_task.state != TaskState.RUNNING:
# This means the update failed due to state conflict
- self._printer.print(f"Scheduler Task '{current_task.name}' state is '{current_task.state}', skipping")
+ PrintStyle.warning(f"Scheduler Task '{current_task.name}' state is '{current_task.state}', skipping")
+ self._unregister_running_task(task_uuid)
return
await current_task.on_run()
@@ -773,9 +831,10 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
agent = None
try:
- self._printer.print(f"Scheduler Task '{current_task.name}' started")
+ PrintStyle.info(f"Scheduler Task '{current_task.name}' started")
context = await self._get_chat_context(current_task)
+ AgentContext.use(context.id)
# Ensure the context is properly registered in the AgentContext._contexts
# This is critical for the polling mechanism to find and stream logs
@@ -795,9 +854,9 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
if url.scheme in ["http", "https", "ftp", "ftps", "sftp"]:
attachment_filenames.append(attachment)
else:
- self._printer.print(f"Skipping attachment: [{attachment}]")
+ PrintStyle.warning(f"Skipping attachment: [{attachment}]")
except Exception:
- self._printer.print(f"Skipping attachment: [{attachment}]")
+ PrintStyle.warning(f"Skipping attachment: [{attachment}]")
self._printer.print("User message:")
self._printer.print(f"> {current_task.prompt}")
@@ -834,7 +893,7 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
result = await agent.monologue()
# Success
- self._printer.print(f"Scheduler Task '{current_task.name}' completed: {result}")
+ PrintStyle.success(f"Scheduler Task '{current_task.name}' completed: {result}")
await self._persist_chat(current_task, context)
await current_task.on_success(result)
@@ -842,36 +901,57 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
await self._tasks.reload()
updated_task = self.get_task_by_uuid(task_uuid)
if updated_task and updated_task.state != TaskState.IDLE:
- self._printer.print(f"Fixing task state consistency: '{current_task.name}' state is not IDLE after success")
+ PrintStyle.warning(f"Fixing task state consistency: '{current_task.name}' state is not IDLE after success")
await self.update_task(task_uuid, state=TaskState.IDLE)
+ except asyncio.CancelledError:
+ PrintStyle.warning(f"Scheduler Task '{current_task.name}' cancelled by user")
+ try:
+ await asyncio.shield(self.update_task(task_uuid, state=TaskState.IDLE))
+ except Exception:
+ pass
+ raise
except Exception as e:
# Error
- self._printer.print(f"Scheduler Task '{current_task.name}' failed: {e}")
+ PrintStyle.error(f"Scheduler Task '{current_task.name}' failed: {e}")
await current_task.on_error(str(e))
# Explicitly verify task was updated in storage after error
await self._tasks.reload()
updated_task = self.get_task_by_uuid(task_uuid)
if updated_task and updated_task.state != TaskState.ERROR:
- self._printer.print(f"Fixing task state consistency: '{current_task.name}' state is not ERROR after failure")
+ PrintStyle.warning(f"Fixing task state consistency: '{current_task.name}' state is not ERROR after failure")
await self.update_task(task_uuid, state=TaskState.ERROR)
if agent:
agent.handle_critical_exception(e)
finally:
# Call on_finish for task-specific cleanup
- await current_task.on_finish()
+ try:
+ await asyncio.shield(current_task.on_finish())
+ except asyncio.CancelledError:
+ pass
+ except Exception:
+ pass
# Make one final save to ensure all states are persisted
- await self._tasks.save()
+ try:
+ await asyncio.shield(self._tasks.save())
+ except asyncio.CancelledError:
+ pass
+ except Exception:
+ pass
+
+ self._unregister_running_task(task_uuid)
deferred_task = DeferredTask(thread_name=self.__class__.__name__)
+ self._register_running_task(task.uuid, deferred_task)
deferred_task.start_task(_run_task_wrapper, task.uuid, task_context)
- # Ensure background execution doesn't exit immediately on async await, especially in script contexts
- # This helps prevent premature exits when running from non-event-loop contexts
- asyncio.create_task(asyncio.sleep(0.1))
+ # Ensure background execution doesn't exit immediately on async await, especially in script contexts.
+ # Yielding briefly keeps callers like CLI scripts alive long enough for the DeferredTask thread to spin up
+ # without leaving stray pending tasks that trigger \"Task was destroyed\" warnings when the loop shuts down.
+ await asyncio.sleep(0.1)
def serialize_all_tasks(self) -> list[Dict[str, Any]]:
"""
@@ -1014,7 +1094,7 @@ def parse_task_plan(plan_data: Dict[str, Any]) -> TaskPlan:
done=done_dates_cast
)
except Exception as e:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
f"Error parsing task plan: {e}"
)
# Return empty plan instead of failing
@@ -1036,12 +1116,19 @@ def serialize_task(task: Union[ScheduledTask, AdHocTask, PlannedTask]) -> Dict[s
"system_prompt": task.system_prompt,
"prompt": task.prompt,
"attachments": task.attachments,
+ "project_name": task.project_name,
+ "project_color": task.project_color,
"created_at": serialize_datetime(task.created_at),
"updated_at": serialize_datetime(task.updated_at),
"last_run": serialize_datetime(task.last_run),
"next_run": serialize_datetime(task.get_next_run()),
"last_result": task.last_result,
- "context_id": task.context_id
+ "context_id": task.context_id,
+ "dedicated_context": task.is_dedicated(),
+ "project": {
+ "name": task.project_name,
+ "color": task.project_color,
+ },
}
# Add type-specific fields
@@ -1101,11 +1188,13 @@ def deserialize_task(task_data: Dict[str, Any], task_class: Optional[Type[T]] =
"system_prompt": task_data.get("system_prompt", ""),
"prompt": task_data.get("prompt", ""),
"attachments": task_data.get("attachments", []),
+ "project_name": task_data.get("project_name"),
+ "project_color": task_data.get("project_color"),
"created_at": parse_datetime(task_data.get("created_at")),
"updated_at": parse_datetime(task_data.get("updated_at")),
"last_run": parse_datetime(task_data.get("last_run")),
"last_result": task_data.get("last_result"),
- "context_id": task_data.get("context_id")
+ "context_id": task_data.get("context_id"),
}
# Add type-specific fields
diff --git a/python/helpers/tokens.py b/python/helpers/tokens.py
index 622c6d5be1..42224a495c 100644
--- a/python/helpers/tokens.py
+++ b/python/helpers/tokens.py
@@ -13,7 +13,7 @@ def count_tokens(text: str, encoding_name="cl100k_base") -> int:
encoding = tiktoken.get_encoding(encoding_name)
# Encode the text and count the tokens
- tokens = encoding.encode(text)
+ tokens = encoding.encode(text, disallowed_special=())
token_count = len(tokens)
return token_count
diff --git a/python/helpers/tool.py b/python/helpers/tool.py
index 5825958efd..2613f1c5d9 100644
--- a/python/helpers/tool.py
+++ b/python/helpers/tool.py
@@ -22,11 +22,20 @@ def __init__(self, agent: Agent, name: str, method: str | None, args: dict[str,s
self.args = args
self.loop_data = loop_data
self.message = message
+ self.progress: str = ""
@abstractmethod
async def execute(self,**kwargs) -> Response:
pass
+ def set_progress(self, content: str | None):
+ self.progress = content or ""
+
+ def add_progress(self, content: str | None):
+ if not content:
+ return
+ self.progress += content
+
async def before_execution(self, **kwargs):
PrintStyle(font_color="#1B4F72", padding=True, background_color="white", bold=True).print(f"{self.agent.agent_name}: Using tool '{self.name}'")
self.log = self.get_log_object()
diff --git a/python/helpers/tty_session.py b/python/helpers/tty_session.py
index 68b546f67e..d6a3f91a60 100644
--- a/python/helpers/tty_session.py
+++ b/python/helpers/tty_session.py
@@ -204,53 +204,66 @@ async def drain(self):
async def _spawn_winpty(cmd, cwd, env, echo):
- # A quick way to silence command echo in cmd.exe is /Q (quiet)
- if not echo and cmd.strip().lower().startswith("cmd") and "/q" not in cmd.lower():
- cmd = cmd.replace("cmd.exe", "cmd.exe /Q")
+ # Clean PowerShell startup: no logo, no profile, bypass execution policy for deterministic behavior
+ if cmd.strip().lower().startswith("powershell"):
+ if "-nolog" not in cmd.lower():
+ cmd = cmd.replace("powershell.exe", "powershell.exe -NoLogo -NoProfile -ExecutionPolicy Bypass", 1)
cols, rows = 80, 25
- pty = winpty.PTY(cols, rows) # type: ignore
- child = pty.spawn(cmd, cwd=cwd or os.getcwd(), env=env)
-
- master_r_fd = msvcrt.open_osfhandle(child.conout_pipe, os.O_RDONLY) # type: ignore
- master_w_fd = msvcrt.open_osfhandle(child.conin_pipe, 0) # type: ignore
+ child = winpty.PtyProcess.spawn(cmd, dimensions=(rows, cols), cwd=cwd or os.getcwd(), env=env) # type: ignore
loop = asyncio.get_running_loop()
reader = asyncio.StreamReader()
- def _on_data():
- try:
- data = os.read(master_r_fd, 1 << 16)
- except OSError:
- data = b""
- if data:
- reader.feed_data(data)
- else:
- reader.feed_eof()
- loop.remove_reader(master_r_fd)
+ async def _on_data():
+ while child.isalive():
+ try:
+ # Run blocking read in executor to not block event loop
+ data = await loop.run_in_executor(None, child.read, 1 << 16)
+ if data:
+ reader.feed_data(data.encode('utf-8') if isinstance(data, str) else data)
+ except EOFError:
+ break
+ except Exception:
+ await asyncio.sleep(0.01)
+ reader.feed_eof()
- loop.add_reader(master_r_fd, _on_data)
+ # Start pumping output in background
+ asyncio.create_task(_on_data())
class _Stdin:
def write(self, d):
- os.write(master_w_fd, d)
+ # Use winpty's write method, not os.write
+ if isinstance(d, bytes):
+ d = d.decode('utf-8', errors='replace')
+ # Windows needs \r\n for proper line endings
+ if _IS_WIN:
+ d = d.replace('\n', '\r\n')
+ child.write(d)
async def drain(self):
- await asyncio.sleep(0)
+ await asyncio.sleep(0.01) # Give write time to complete
- class _Proc(asyncio.subprocess.Process):
+ class _Proc:
def __init__(self):
self.stdin = _Stdin() # type: ignore
self.stdout = reader
self.pid = child.pid
+ self.returncode = None
async def wait(self):
while child.isalive():
await asyncio.sleep(0.2)
+ self.returncode = 0
return 0
+ def terminate(self):
+ if child.isalive():
+ child.terminate()
+
def kill(self):
- child.kill()
+ if child.isalive():
+ child.kill()
return _Proc()
@@ -259,7 +272,7 @@ def kill(self):
if __name__ == "__main__":
async def interactive_shell():
- shell_cmd, prompt_hint = ("cmd.exe", "$") if _IS_WIN else ("/bin/bash", "$")
+ shell_cmd, prompt_hint = ("powershell.exe", ">") if _IS_WIN else ("/bin/bash", "$")
# echo=False β suppress the shellβs own echo of commands
term = TTYSession(shell_cmd)
diff --git a/python/helpers/tunnel_manager.py b/python/helpers/tunnel_manager.py
index 93e8ec635c..42a1d87cd0 100644
--- a/python/helpers/tunnel_manager.py
+++ b/python/helpers/tunnel_manager.py
@@ -1,6 +1,13 @@
-from flaredantic import FlareTunnel, FlareConfig, ServeoConfig, ServeoTunnel
+from flaredantic import (
+ FlareTunnel, FlareConfig,
+ ServeoConfig, ServeoTunnel,
+ MicrosoftTunnel, MicrosoftConfig,
+ notifier, NotifyData, NotifyEvent
+)
import threading
+from collections import deque
+from python.helpers.print_style import PrintStyle
# Singleton to manage the tunnel instance
class TunnelManager:
@@ -19,6 +26,35 @@ def __init__(self):
self.tunnel_url = None
self.is_running = False
self.provider = None
+ self.notifications = deque(maxlen=50)
+ self._subscribed = False
+
+ def _on_notify(self, data: NotifyData):
+ """Handle notifications from flaredantic"""
+ self.notifications.append({
+ "event": data.event.value,
+ "message": data.message,
+ "data": data.data
+ })
+
+ def _ensure_subscribed(self):
+ """Subscribe to flaredantic notifications if not already"""
+ if not self._subscribed:
+ notifier.subscribe(self._on_notify)
+ self._subscribed = True
+
+ def get_notifications(self):
+ """Get and clear pending notifications"""
+ notifications = list(self.notifications)
+ self.notifications.clear()
+ return notifications
+
+ def get_last_error(self):
+ """Check for recent error in notifications without clearing"""
+ for n in reversed(list(self.notifications)):
+ if n['event'] == NotifyEvent.ERROR.value:
+ return n['message']
+ return None
def start_tunnel(self, port=80, provider="serveo"):
"""Start a new tunnel or return the existing one's URL"""
@@ -26,6 +62,8 @@ def start_tunnel(self, port=80, provider="serveo"):
return self.tunnel_url
self.provider = provider
+ self._ensure_subscribed()
+ self.notifications.clear()
try:
# Start tunnel in a separate thread to avoid blocking
@@ -34,6 +72,9 @@ def run_tunnel():
if self.provider == "cloudflared":
config = FlareConfig(port=port, verbose=True)
self.tunnel = FlareTunnel(config)
+ elif self.provider == "microsoft":
+ config = MicrosoftConfig(port=port, verbose=True) # type: ignore
+ self.tunnel = MicrosoftTunnel(config)
else: # Default to serveo
config = ServeoConfig(port=port) # type: ignore
self.tunnel = ServeoTunnel(config)
@@ -42,23 +83,34 @@ def run_tunnel():
self.tunnel_url = self.tunnel.tunnel_url
self.is_running = True
except Exception as e:
- print(f"Error in tunnel thread: {str(e)}")
+ error_msg = str(e)
+ PrintStyle.error(f"Error in tunnel thread: {error_msg}")
+ self.notifications.append({
+ "event": NotifyEvent.ERROR.value,
+ "message": error_msg,
+ "data": None
+ })
tunnel_thread = threading.Thread(target=run_tunnel)
tunnel_thread.daemon = True
tunnel_thread.start()
- # Wait for tunnel to start (max 15 seconds instead of 5)
- for _ in range(150): # Increased from 50 to 150 iterations
+ # Wait for tunnel to start (no timeout - user may need time for login)
+ import time
+ while True:
if self.tunnel_url:
break
- import time
-
+ # Check if we have errors
+ if any(n['event'] == NotifyEvent.ERROR.value for n in self.notifications):
+ break
+ # Check if thread died without producing URL
+ if not tunnel_thread.is_alive():
+ break
time.sleep(0.1)
return self.tunnel_url
except Exception as e:
- print(f"Error starting tunnel: {str(e)}")
+ PrintStyle.error(f"Error starting tunnel: {str(e)}")
return None
def stop_tunnel(self):
diff --git a/python/helpers/update_check.py b/python/helpers/update_check.py
new file mode 100644
index 0000000000..ce083e1c9f
--- /dev/null
+++ b/python/helpers/update_check.py
@@ -0,0 +1,15 @@
+from python.helpers import git, runtime
+import hashlib
+
+async def check_version():
+ import httpx
+
+ current_version = git.get_version()
+ anonymized_id = hashlib.sha256(runtime.get_persistent_id().encode()).hexdigest()[:20]
+
+ url = "https://api.agent-zero.ai/a0-update-check"
+ payload = {"current_version": current_version, "anonymized_id": anonymized_id}
+ async with httpx.AsyncClient() as client:
+ response = await client.post(url, json=payload)
+ version = response.json()
+ return version
\ No newline at end of file
diff --git a/python/helpers/vector_db.py b/python/helpers/vector_db.py
index 20254685b8..8a813cabad 100644
--- a/python/helpers/vector_db.py
+++ b/python/helpers/vector_db.py
@@ -1,5 +1,4 @@
from typing import Any, List, Sequence
-import uuid
from langchain_community.vectorstores import FAISS
# faiss needs to be patched for python 3.12 on arm #TODO remove once not needed
@@ -14,8 +13,10 @@
DistanceStrategy,
)
from langchain.embeddings import CacheBackedEmbeddings
+from simpleeval import simple_eval
from agent import Agent
+from python.helpers import guids
class MyFaiss(FAISS):
@@ -98,7 +99,7 @@ async def search_by_metadata(self, filter: str, limit: int = 0) -> list[Document
return result
async def insert_documents(self, docs: list[Document]):
- ids = [str(uuid.uuid4()) for _ in range(len(docs))]
+ ids = [guids.generate_id() for _ in range(len(docs))]
if ids:
for doc, id in zip(docs, ids):
@@ -140,7 +141,7 @@ def cosine_normalizer(val: float) -> float:
def get_comparator(condition: str):
def comparator(data: dict[str, Any]):
try:
- result = eval(condition, {}, data)
+ result = simple_eval(condition, {}, data)
return result
except Exception as e:
# PrintStyle.error(f"Error evaluating condition: {e}")
diff --git a/python/helpers/wait.py b/python/helpers/wait.py
new file mode 100644
index 0000000000..83f2886193
--- /dev/null
+++ b/python/helpers/wait.py
@@ -0,0 +1,68 @@
+import asyncio
+from datetime import datetime, timezone
+
+from python.helpers.print_style import PrintStyle
+
+
+def format_remaining_time(total_seconds: float) -> str:
+ if total_seconds < 0:
+ total_seconds = 0
+
+ days, remainder = divmod(total_seconds, 86400)
+ hours, remainder = divmod(remainder, 3600)
+ minutes, seconds = divmod(remainder, 60)
+
+ days = int(days)
+ hours = int(hours)
+ minutes = int(minutes)
+
+ parts = []
+ if days > 0:
+ parts.append(f"{days}d")
+ if hours > 0:
+ parts.append(f"{hours}h")
+ if minutes > 0:
+ parts.append(f"{minutes}m")
+
+ if days > 0 or hours > 0:
+ if seconds >= 1:
+ parts.append(f"{int(seconds)}s")
+ elif minutes > 0:
+ if seconds >= 0.1:
+ parts.append(f"{seconds:.1f}s")
+ else:
+ parts.append(f"{total_seconds:.1f}s")
+
+ if not parts:
+ return "0.0s remaining"
+
+ return " ".join(parts) + " remaining"
+
+
+async def managed_wait(agent, target_time, is_duration_wait, log, get_heading_callback):
+
+ while datetime.now(timezone.utc) < target_time:
+ before_intervention = datetime.now(timezone.utc)
+ await agent.handle_intervention()
+ after_intervention = datetime.now(timezone.utc)
+
+ if is_duration_wait:
+ pause_duration = after_intervention - before_intervention
+ if pause_duration.total_seconds() > 1.5: # Adjust for pauses longer than the sleep cycle
+ target_time += pause_duration
+ PrintStyle.info(
+ f"Wait extended by {pause_duration.total_seconds():.1f}s to {target_time.isoformat()}...",
+ )
+
+ current_time = datetime.now(timezone.utc)
+ if current_time >= target_time:
+ break
+
+ remaining_seconds = (target_time - current_time).total_seconds()
+ if log:
+ log.update(heading=get_heading_callback(format_remaining_time(remaining_seconds)))
+ sleep_duration = min(1.0, remaining_seconds)
+
+ await asyncio.sleep(sleep_duration)
+
+ return target_time
diff --git a/python/tools/behaviour_adjustment.py b/python/tools/behaviour_adjustment.py
index 735a39dfa4..31fe67cd70 100644
--- a/python/tools/behaviour_adjustment.py
+++ b/python/tools/behaviour_adjustment.py
@@ -52,13 +52,13 @@ async def log_callback(content):
def get_custom_rules_file(agent: Agent):
- return memory.get_memory_subdir_abs(agent) + f"/behaviour.md"
+ return files.get_abs_path(memory.get_memory_subdir_abs(agent), "behaviour.md")
def read_rules(agent: Agent):
rules_file = get_custom_rules_file(agent)
if files.exists(rules_file):
- rules = files.read_prompt_file(rules_file)
+ rules = agent.read_prompt(rules_file)
return agent.read_prompt("agent.system.behaviour.md", rules=rules)
else:
rules = agent.read_prompt("agent.system.behaviour_default.md")
diff --git a/python/tools/browser_agent.py b/python/tools/browser_agent.py
index 949543a522..6d5f085b26 100644
--- a/python/tools/browser_agent.py
+++ b/python/tools/browser_agent.py
@@ -9,7 +9,7 @@
from python.helpers.browser_use import browser_use # type: ignore[attr-defined]
from python.helpers.print_style import PrintStyle
from python.helpers.playwright import ensure_playwright_binary
-from python.helpers.secrets import SecretsManager
+from python.helpers.secrets import get_secrets_manager
from python.extensions.message_loop_start._10_iteration_no import get_iter_no
from pydantic import BaseModel
import uuid
@@ -153,7 +153,7 @@ async def complete_task(params: DoneResult):
try:
- secrets_manager = SecretsManager.get_instance()
+ secrets_manager = get_secrets_manager(self.agent.context)
secrets_dict = secrets_manager.load_secrets()
self.use_agent = browser_use.Agent(
@@ -216,7 +216,7 @@ async def execute(self, message="", reset="", **kwargs):
self.guid = self.agent.context.generate_id() # short random id
reset = str(reset).lower().strip() == "true"
await self.prepare_state(reset=reset)
- message = SecretsManager.get_instance().mask_values(message, placeholder="{key}") # mask any potential passwords passed from A0 to browser-use to browser-use format
+ message = get_secrets_manager(self.agent.context).mask_values(message, placeholder="{key}") # mask any potential passwords passed from A0 to browser-use to browser-use format
task = self.state.start_task(message) if self.state else None
# wait for browser agent to finish and update progress with timeout
@@ -394,7 +394,7 @@ def update_progress(self, text):
def _mask(self, text: str) -> str:
try:
- return SecretsManager.get_instance().mask_values(text or "")
+ return get_secrets_manager(self.agent.context).mask_values(text or "")
except Exception as e:
return text or ""
diff --git a/python/tools/code_execution_tool.py b/python/tools/code_execution_tool.py
index 0b0a054167..a37056057b 100644
--- a/python/tools/code_execution_tool.py
+++ b/python/tools/code_execution_tool.py
@@ -3,7 +3,7 @@
import shlex
import time
from python.helpers.tool import Tool, Response
-from python.helpers import files, rfc_exchange
+from python.helpers import files, rfc_exchange, projects, runtime
from python.helpers.print_style import PrintStyle
from python.helpers.shell_local import LocalInteractiveSession
from python.helpers.shell_ssh import SSHInteractiveSession
@@ -12,21 +12,58 @@
from python.helpers.messages import truncate_text as truncate_text_agent
import re
+# Timeouts for python, nodejs, and terminal runtimes.
+CODE_EXEC_TIMEOUTS: dict[str, int] = {
+ "first_output_timeout": 30,
+ "between_output_timeout": 15,
+ "max_exec_timeout": 180,
+ "dialog_timeout": 5,
+}
+
+# Timeouts for output runtime.
+OUTPUT_TIMEOUTS: dict[str, int] = {
+ "first_output_timeout": 90,
+ "between_output_timeout": 45,
+ "max_exec_timeout": 300,
+ "dialog_timeout": 5,
+}
+
+@dataclass
+class ShellWrap:
+ id: int
+ session: LocalInteractiveSession | SSHInteractiveSession
+ running: bool
@dataclass
class State:
ssh_enabled: bool
- shells: dict[int, LocalInteractiveSession | SSHInteractiveSession]
+ shells: dict[int, ShellWrap]
class CodeExecution(Tool):
- async def execute(self, **kwargs):
+ # Common shell prompt regex patterns (add more as needed)
+ prompt_patterns = [
+ re.compile(r"\\(venv\\).+[$#] ?$"), # (venv) ...$ or (venv) ...#
+ re.compile(r"root@[^:]+:[^#]+# ?$"), # root@container:~#
+ re.compile(r"[a-zA-Z0-9_.-]+@[^:]+:[^$#]+[$#] ?$"), # user@host:~$
+ re.compile(r"\(?.*\)?\s*PS\s+[^>]+> ?$"), # PowerShell prompt like (base) PS C:\...>
+ ]
+ # potential dialog detection
+ dialog_patterns = [
+ re.compile(r"Y/N", re.IGNORECASE), # Y/N anywhere in line
+ re.compile(r"yes/no", re.IGNORECASE), # yes/no anywhere in line
+ re.compile(r":\s*$"), # line ending with colon
+ re.compile(r"\?\s*$"), # line ending with question mark
+ ]
+
+ async def execute(self, **kwargs) -> Response:
await self.agent.handle_intervention() # wait for intervention and handle it, if paused
runtime = self.args.get("runtime", "").lower().strip()
session = int(self.args.get("session", 0))
+ self.allow_running = bool(self.args.get("allow_running", False))
if runtime == "python":
response = await self.execute_python_code(
@@ -42,7 +79,7 @@ async def execute(self, **kwargs):
)
elif runtime == "output":
response = await self.get_terminal_output(
- session=session, first_output_timeout=60, between_output_timeout=5
+ session=session, timeouts=OUTPUT_TIMEOUTS
)
elif runtime == "reset":
response = await self.reset_terminal(session=session)
@@ -81,18 +118,18 @@ async def prepare_state(self, reset=False, session: int | None = None):
# always reset state when ssh_enabled changes
if not self.state or self.state.ssh_enabled != self.agent.config.code_exec_ssh_enabled:
# initialize shells dictionary if not exists
- shells: dict[int, LocalInteractiveSession | SSHInteractiveSession] = {}
+ shells: dict[int, ShellWrap] = {}
else:
shells = self.state.shells.copy()
# Only reset the specified session if provided
if reset and session is not None and session in shells:
- await shells[session].close()
+ await shells[session].session.close()
del shells[session]
elif reset and not session:
# Close all sessions if full reset requested
for s in list(shells.keys()):
- await shells[s].close()
+ await shells[s].session.close()
shells = {}
# initialize local or remote interactive shell interface for session 0 if needed
@@ -109,11 +146,12 @@ async def prepare_state(self, reset=False, session: int | None = None):
self.agent.config.code_exec_ssh_port,
self.agent.config.code_exec_ssh_user,
pswd,
+ cwd=self.get_cwd(),
)
else:
- shell = LocalInteractiveSession()
+ shell = LocalInteractiveSession(cwd=self.get_cwd())
- shells[session] = shell
+ shells[session] = ShellWrap(id=session, session=shell, running=False)
await shell.connect()
self.state = State(shells=shells, ssh_enabled=self.agent.config.code_exec_ssh_enabled)
@@ -135,28 +173,35 @@ async def execute_nodejs_code(self, session: int, code: str, reset: bool = False
async def execute_terminal_command(
self, session: int, command: str, reset: bool = False
):
- prefix = "bash> " + self.format_command_for_output(command) + "\n\n"
+ prefix = ("bash>" if not runtime.is_windows() or self.agent.config.code_exec_ssh_enabled else "PS>") + self.format_command_for_output(command) + "\n\n"
return await self.terminal_session(session, command, reset, prefix)
async def terminal_session(
- self, session: int, command: str, reset: bool = False, prefix: str = ""
+ self, session: int, command: str, reset: bool = False, prefix: str = "", timeouts: dict | None = None
):
self.state = await self.prepare_state(reset=reset, session=session)
await self.agent.handle_intervention() # wait for intervention and handle it, if paused
+
+ # Check if session is running and handle it
+ if not self.allow_running:
+ if response := await self.handle_running_session(session):
+ return response
+
# try again on lost connection
for i in range(2):
try:
- await self.state.shells[session].send_command(command)
+ self.state.shells[session].running = True
+ await self.state.shells[session].session.send_command(command)
locl = (
" (local)"
- if isinstance(self.state.shells[session], LocalInteractiveSession)
+ if isinstance(self.state.shells[session].session, LocalInteractiveSession)
else (
" (remote)"
- if isinstance(self.state.shells[session], SSHInteractiveSession)
+ if isinstance(self.state.shells[session].session, SSHInteractiveSession)
else " (unknown)"
)
)
@@ -164,7 +209,7 @@ async def terminal_session(
PrintStyle(
background_color="white", font_color="#1B4F72", bold=True
).print(f"{self.agent.agent_name} code execution output{locl}")
- return await self.get_terminal_output(session=session, prefix=prefix)
+ return await self.get_terminal_output(session=session, prefix=prefix, timeouts=(timeouts or CODE_EXEC_TIMEOUTS))
except Exception as e:
if i == 1:
@@ -196,26 +241,18 @@ async def get_terminal_output(
max_exec_timeout=180, # hard cap on total runtime
sleep_time=0.1,
prefix="",
+ timeouts: dict | None = None,
):
# if not self.state:
self.state = await self.prepare_state(session=session)
- # Common shell prompt regex patterns (add more as needed)
- prompt_patterns = [
- re.compile(r"\(venv\).+[$#] ?$"), # (venv) ...$ or (venv) ...#
- re.compile(r"root@[^:]+:[^#]+# ?$"), # root@container:~#
- re.compile(r"[a-zA-Z0-9_.-]+@[^:]+:[^$#]+[$#] ?$"), # user@host:~$
- re.compile(r"bash-\d+\.\d+\$ ?$"), # bash-3.2$ (version can vary)
- ]
-
- # potential dialog detection
- dialog_patterns = [
- re.compile(r"Y/N", re.IGNORECASE), # Y/N anywhere in line
- re.compile(r"yes/no", re.IGNORECASE), # yes/no anywhere in line
- re.compile(r":\s*$"), # line ending with colon
- re.compile(r"\?\s*$"), # line ending with question mark
- ]
+ # Override timeouts if a dict is provided
+ if timeouts:
+ first_output_timeout = timeouts.get("first_output_timeout", first_output_timeout)
+ between_output_timeout = timeouts.get("between_output_timeout", between_output_timeout)
+ dialog_timeout = timeouts.get("dialog_timeout", dialog_timeout)
+ max_exec_timeout = timeouts.get("max_exec_timeout", max_exec_timeout)
start_time = time.time()
last_output_time = start_time
@@ -229,7 +266,7 @@ async def get_terminal_output(
while True:
await asyncio.sleep(sleep_time)
- full_output, partial_output = await self.state.shells[session].read_output(
+ full_output, partial_output = await self.state.shells[session].session.read_output(
timeout=1, reset_full_output=reset_full_output
)
reset_full_output = False # only reset once
@@ -241,6 +278,7 @@ async def get_terminal_output(
PrintStyle(font_color="#85C1E9").stream(partial_output)
# full_output += partial_output # Append new output
truncated_output = self.fix_full_output(full_output)
+ self.set_progress(truncated_output)
heading = self.get_heading_from_output(truncated_output, 0)
self.log.update(content=prefix + truncated_output, heading=heading)
last_output_time = now
@@ -252,7 +290,7 @@ async def get_terminal_output(
)
last_lines.reverse()
for idx, line in enumerate(last_lines):
- for pat in prompt_patterns:
+ for pat in self.prompt_patterns:
if pat.search(line.strip()):
PrintStyle.info(
"Detected shell prompt, returning output early."
@@ -262,6 +300,7 @@ async def get_terminal_output(
"\n".join(last_lines), idx + 1, True
)
self.log.update(heading=heading)
+ self.mark_session_idle(session)
return truncated_output
# Check for max execution time
@@ -308,7 +347,7 @@ async def get_terminal_output(
truncated_output.splitlines()[-2:] if truncated_output else []
)
for line in last_lines:
- for pat in dialog_patterns:
+ for pat in self.dialog_patterns:
if pat.search(line.strip()):
PrintStyle.info(
"Detected dialog prompt, returning output early."
@@ -331,6 +370,63 @@ async def get_terminal_output(
)
return response
+ async def handle_running_session(
+ self,
+ session=0,
+ reset_full_output=True,
+ prefix=""
+ ):
+ if not self.state or session not in self.state.shells:
+ return None
+ if not self.state.shells[session].running:
+ return None
+
+ full_output, _ = await self.state.shells[session].session.read_output(
+ timeout=1, reset_full_output=reset_full_output
+ )
+ truncated_output = self.fix_full_output(full_output)
+ self.set_progress(truncated_output)
+ heading = self.get_heading_from_output(truncated_output, 0)
+
+ last_lines = (
+ truncated_output.splitlines()[-3:] if truncated_output else []
+ )
+ last_lines.reverse()
+ for idx, line in enumerate(last_lines):
+ for pat in self.prompt_patterns:
+ if pat.search(line.strip()):
+ PrintStyle.info(
+ "Detected shell prompt, returning output early."
+ )
+ self.mark_session_idle(session)
+ return None
+
+ has_dialog = False
+ for line in last_lines:
+ for pat in self.dialog_patterns:
+ if pat.search(line.strip()):
+ has_dialog = True
+ break
+ if has_dialog:
+ break
+
+ if has_dialog:
+ sys_info = self.agent.read_prompt("fw.code.pause_dialog.md", timeout=1)
+ else:
+ sys_info = self.agent.read_prompt("fw.code.running.md", session=session)
+
+ response = self.agent.read_prompt("fw.code.info.md", info=sys_info)
+ if truncated_output:
+ response = truncated_output + "\n\n" + response
+ PrintStyle(font_color="#FFA500", bold=True).print(response)
+ self.log.update(content=prefix + response, heading=heading)
+ return response
+
+ def mark_session_idle(self, session: int = 0):
+ # Mark session as idle - command finished
+ if self.state and session in self.state.shells:
+ self.state.shells[session].running = False
+
async def reset_terminal(self, session=0, reason: str | None = None):
# Print the reason for the reset to the console if provided
if reason:
@@ -371,6 +467,17 @@ def fix_full_output(self, output: str):
# remove any single byte \xXX escapes
output = re.sub(r"(? tuple[str | None, str | None]:
+ context = self.agent.context
+ if not context:
+ return (None, None)
+ project_slug = get_context_project_name(context)
+ if not project_slug:
+ return (None, None)
+ try:
+ metadata = load_basic_project_data(project_slug)
+ color = metadata.get("color") or None
+ except Exception:
+ color = None
+ return project_slug, color
+
async def list_tasks(self, **kwargs) -> Response:
state_filter: list[str] | None = kwargs.get("state", None)
type_filter: list[str] | None = kwargs.get("type", None)
@@ -153,13 +168,17 @@ async def create_scheduled_task(self, **kwargs) -> Response:
if not re.match(cron_regex, task_schedule.to_crontab()):
return Response(message="Invalid cron expression: " + task_schedule.to_crontab(), break_loop=False)
+ project_slug, project_color = self._resolve_project_metadata()
+
task = ScheduledTask.create(
name=name,
system_prompt=system_prompt,
prompt=prompt,
attachments=attachments,
schedule=task_schedule,
- context_id=None if dedicated_context else self.agent.context.id
+ context_id=None if dedicated_context else self.agent.context.id,
+ project_name=project_slug,
+ project_color=project_color,
)
await TaskScheduler.get().add_task(task)
return Response(message=f"Scheduled task '{name}' created: {task.uuid}", break_loop=False)
@@ -172,13 +191,17 @@ async def create_adhoc_task(self, **kwargs) -> Response:
token: str = str(random.randint(1000000000000000000, 9999999999999999999))
dedicated_context: bool = kwargs.get("dedicated_context", False)
+ project_slug, project_color = self._resolve_project_metadata()
+
task = AdHocTask.create(
name=name,
system_prompt=system_prompt,
prompt=prompt,
attachments=attachments,
token=token,
- context_id=None if dedicated_context else self.agent.context.id
+ context_id=None if dedicated_context else self.agent.context.id,
+ project_name=project_slug,
+ project_color=project_color,
)
await TaskScheduler.get().add_task(task)
return Response(message=f"Adhoc task '{name}' created: {task.uuid}", break_loop=False)
@@ -206,6 +229,8 @@ async def create_planned_task(self, **kwargs) -> Response:
done=[]
)
+ project_slug, project_color = self._resolve_project_metadata()
+
# Create planned task with task plan
task = PlannedTask.create(
name=name,
@@ -213,7 +238,9 @@ async def create_planned_task(self, **kwargs) -> Response:
prompt=prompt,
attachments=attachments,
plan=task_plan,
- context_id=None if dedicated_context else self.agent.context.id
+ context_id=None if dedicated_context else self.agent.context.id,
+ project_name=project_slug,
+ project_color=project_color
)
await TaskScheduler.get().add_task(task)
return Response(message=f"Planned task '{name}' created: {task.uuid}", break_loop=False)
@@ -229,7 +256,7 @@ async def wait_for_task(self, **kwargs) -> Response:
return Response(message=f"Task not found: {task_uuid}", break_loop=False)
if task.context_id == self.agent.context.id:
- return Response(message="You can only wait for tasks running in a different chat context (dedicated_context=True).", break_loop=False)
+ return Response(message="You can only wait for tasks running in their own dedicated context.", break_loop=False)
done = False
elapsed = 0
diff --git a/python/tools/wait.py b/python/tools/wait.py
new file mode 100644
index 0000000000..000573c5f2
--- /dev/null
+++ b/python/tools/wait.py
@@ -0,0 +1,89 @@
+import asyncio
+from datetime import datetime, timedelta, timezone
+from python.helpers.tool import Tool, Response
+from python.helpers.print_style import PrintStyle
+from python.helpers.wait import managed_wait
+from python.helpers.localization import Localization
+
+class WaitTool(Tool):
+
+ async def execute(self, **kwargs) -> Response:
+ await self.agent.handle_intervention()
+
+ seconds = self.args.get("seconds", 0)
+ minutes = self.args.get("minutes", 0)
+ hours = self.args.get("hours", 0)
+ days = self.args.get("days", 0)
+ until_timestamp_str = self.args.get("until")
+
+ is_duration_wait = not bool(until_timestamp_str)
+
+ now = datetime.now(timezone.utc)
+ target_time = None
+
+ if until_timestamp_str:
+ try:
+ target_time = Localization.get().localtime_str_to_utc_dt(until_timestamp_str)
+ if not target_time:
+ raise ValueError(f"Invalid timestamp format: {until_timestamp_str}")
+ except ValueError as e:
+ return Response(
+ message=str(e),
+ break_loop=False,
+ )
+ else:
+ wait_duration = timedelta(
+ days=int(days),
+ hours=int(hours),
+ minutes=int(minutes),
+ seconds=int(seconds),
+ )
+ if wait_duration.total_seconds() <= 0:
+ return Response(
+ message="Wait duration must be positive.",
+ break_loop=False,
+ )
+ target_time = now + wait_duration
+
+ if target_time <= now:
+ return Response(
+ message=f"Target time {target_time.isoformat()} is in the past.",
+ break_loop=False,
+ )
+
+ PrintStyle.info(f"Waiting until {target_time.isoformat()}...")
+
+ target_time = await managed_wait(
+ agent=self.agent,
+ target_time=target_time,
+ is_duration_wait=is_duration_wait,
+ log=self.log,
+ get_heading_callback=self.get_heading
+ )
+
+ if self.log:
+ self.log.update(heading=self.get_heading("Done", done=True))
+
+ message = self.agent.read_prompt(
+ "fw.wait_complete.md",
+ target_time=target_time.isoformat()
+ )
+
+ return Response(
+ message=message,
+ break_loop=False,
+ )
+
+ def get_log_object(self):
+ return self.agent.context.log.log(
+ type="progress",
+ heading=self.get_heading(),
+ content="",
+ kvps=self.args,
+ )
+
+ def get_heading(self, text: str = "", done: bool = False):
+ done_icon = " icon://done_all" if done else ""
+ if not text:
+ text = f"Waiting..."
+ return f"icon://timer Wait: {text}{done_icon}"
diff --git a/requirements.dev.txt b/requirements.dev.txt
new file mode 100644
index 0000000000..93bada7955
--- /dev/null
+++ b/requirements.dev.txt
@@ -0,0 +1,3 @@
+pytest>=8.4.2
+pytest-asyncio>=1.2.0
+pytest-mock>=3.15.1
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 17856f8b76..755cfcf5ec 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,7 @@ fastmcp==2.3.4
fasta2a==0.5.0
flask[async]==3.0.3
flask-basicauth==0.2.0
-flaredantic==0.1.4
+flaredantic==0.1.5
GitPython==3.1.43
inputimeout==1.0.4
kokoro>=0.9.2
@@ -33,8 +33,8 @@ unstructured-client==0.31.0
webcolors==24.6.0
nest-asyncio==1.6.0
crontab==1.0.1
-litellm==1.75.0
markdownify==1.1.0
+pydantic==2.11.7
pymupdf==1.25.3
pytesseract==0.3.13
pdf2image==1.17.0
@@ -42,3 +42,9 @@ crontab==1.0.1
pathspec>=0.12.1
psutil>=7.0.0
soundfile==0.13.1
+imapclient>=3.0.1
+html2text>=2024.2.26
+beautifulsoup4>=4.12.3
+boto3>=1.35.0
+exchangelib>=5.4.3
+pywinpty==3.0.2; sys_platform == "win32"
\ No newline at end of file
diff --git a/requirements2.txt b/requirements2.txt
new file mode 100644
index 0000000000..7256765d89
--- /dev/null
+++ b/requirements2.txt
@@ -0,0 +1,2 @@
+litellm==1.79.3
+openai==1.99.5
\ No newline at end of file
diff --git a/run_ui.py b/run_ui.py
index adbf75085d..1691f69e74 100644
--- a/run_ui.py
+++ b/run_ui.py
@@ -17,6 +17,7 @@
from python.helpers.extract_tools import load_classes_from_folder
from python.helpers.api import ApiHandler
from python.helpers.print_style import PrintStyle
+from python.helpers import login
# disable logging
import logging
@@ -116,24 +117,17 @@ async def decorated(*args, **kwargs):
return decorated
-def _get_credentials_hash():
- user = dotenv.get_dotenv_value("AUTH_LOGIN")
- password = dotenv.get_dotenv_value("AUTH_PASSWORD")
- if not user:
- return None
- return hashlib.sha256(f"{user}:{password}".encode()).hexdigest()
-
# require authentication for handlers
def requires_auth(f):
@wraps(f)
async def decorated(*args, **kwargs):
- user_pass_hash = _get_credentials_hash()
+ user_pass_hash = login.get_credentials_hash()
# If no auth is configured, just proceed
if not user_pass_hash:
return await f(*args, **kwargs)
if session.get('authentication') != user_pass_hash:
- return redirect(url_for('login'))
+ return redirect(url_for('login_handler'))
return await f(*args, **kwargs)
@@ -153,14 +147,14 @@ async def decorated(*args, **kwargs):
return decorated
@webapp.route("/login", methods=["GET", "POST"])
-async def login():
+async def login_handler():
error = None
if request.method == 'POST':
user = dotenv.get_dotenv_value("AUTH_LOGIN")
password = dotenv.get_dotenv_value("AUTH_PASSWORD")
if request.form['username'] == user and request.form['password'] == password:
- session['authentication'] = _get_credentials_hash()
+ session['authentication'] = login.get_credentials_hash()
return redirect(url_for('serve_index'))
else:
error = 'Invalid Credentials. Please try again.'
@@ -169,9 +163,9 @@ async def login():
return render_template_string(login_page_content, error=error)
@webapp.route("/logout")
-async def logout():
+async def logout_handler():
session.pop('authentication', None)
- return redirect(url_for('login'))
+ return redirect(url_for('login_handler'))
# handle default address, load index
@webapp.route("/", methods=["GET"])
diff --git a/tests/chunk_parser_test.py b/tests/chunk_parser_test.py
index 9297d2fbf3..fade5db55b 100644
--- a/tests/chunk_parser_test.py
+++ b/tests/chunk_parser_test.py
@@ -7,6 +7,10 @@
ex2 = "reasoning goes here None:
+ for entry, value in structure.items():
+ rel = os.path.join(base_rel, entry)
+ if isinstance(value, dict):
+ create_dir(rel)
+ materialize_structure(rel, value)
+ else:
+ write_file(rel, "" if value is None else str(value))
+
+
+def ensure_ignore_file(base_rel: str, content: str) -> None:
+ write_file(os.path.join(base_rel, ".treeignore"), content.strip() + "\n")
+
+
+def print_header(title: str, char: str = "=") -> None:
+ print(char * 80)
+ print(title)
+ print(char * 80)
+
+
+def print_flat(items: List[Dict[str, Any]]) -> None:
+ print("level type name text")
+ print("-" * 80)
+ for item in items:
+ level = item["level"]
+ item_type = item["type"]
+ name = item["name"]
+ text = item["text"]
+ print(f"{level:<5} {item_type:<7} {name:<20} {text}")
+
+
+def print_nested(items: List[Dict[str, Any]], root_label: str) -> None:
+ print(root_label)
+
+ def recurse(nodes: List[Dict[str, Any]], prefix: str) -> None:
+ total = len(nodes)
+ for index, node in enumerate(nodes):
+ is_last = index == total - 1
+ connector = "βββ " if is_last else "βββ "
+ label = node["name"] + ("/" if node["type"] == "folder" else "")
+ print(f"{prefix}{connector}{label} [{node['type']}]")
+ children = node.get("items") or []
+ if children:
+ child_prefix = prefix + (" " if is_last else "β ")
+ recurse(children, child_prefix)
+
+ recurse(items, "")
+
+
+@contextmanager
+def scenario_directory(name: str) -> Iterable[str]:
+ rel_path = os.path.join(BASE_TEMP_ROOT, name)
+ delete_dir(rel_path)
+ create_dir(rel_path)
+ try:
+ yield rel_path
+ finally:
+ delete_dir(rel_path)
+
+
+def _set_entry_times(relative_path: str, timestamp: float) -> None:
+ abs_path = get_abs_path(relative_path)
+ os.utime(abs_path, (timestamp, timestamp))
+ time.sleep(0.01)
+
+
+def _apply_timestamps(base_rel: str, paths: List[str], base_ts: Optional[float] = None) -> None:
+ if base_ts is None:
+ base_ts = time.time()
+ for offset, rel in enumerate(paths, start=1):
+ _set_entry_times(os.path.join(base_rel, rel), base_ts + offset)
+
+
+def list_scenarios(scenarios: List[Scenario]) -> None:
+ print("Available scenarios:")
+ for scenario in scenarios:
+ print(f" - {scenario.name}: {scenario.description}")
+
+
+def run_scenarios(selected: List[Scenario]) -> None:
+ create_dir(BASE_TEMP_ROOT)
+ for scenario in selected:
+ print_header(f"Scenario: {scenario.name} β {scenario.description}")
+ with scenario_directory(scenario.name) as base_rel:
+ materialize_structure(base_rel, scenario.structure)
+
+ if scenario.ignore_content:
+ ensure_ignore_file(base_rel, scenario.ignore_content)
+
+ if scenario.setup:
+ scenario.setup(base_rel)
+
+ for config in scenario.configs:
+ print_header(f"Configuration: {config.label}", "-")
+ params = {
+ "relative_path": base_rel,
+ "max_depth": 0,
+ "max_lines": 0,
+ "folders_first": True,
+ "max_folders": None,
+ "max_files": None,
+ "sort": (SORT_BY_MODIFIED, SORT_DESC),
+ **config.params,
+ }
+ output_mode = params.setdefault("output_mode", OUTPUT_MODE_STRING)
+ print("Parameters:")
+ print(f" output_mode : {output_mode}")
+ print(f" folders_first : {params['folders_first']}")
+ sort_key, sort_dir = params["sort"]
+ print(f" sort : key={sort_key}, direction={sort_dir}")
+ print(f" max_depth : {params['max_depth']}")
+ print(f" max_lines : {params['max_lines']}")
+ print(f" max_folders : {params['max_folders']}")
+ print(f" max_files : {params['max_files']}")
+ print(f" ignore : {params.get('ignore')}")
+ print()
+ result = file_tree(**params)
+
+ if output_mode == OUTPUT_MODE_STRING:
+ print(result)
+ elif output_mode == OUTPUT_MODE_FLAT:
+ print_flat(result) # type: ignore[arg-type]
+ elif output_mode == OUTPUT_MODE_NESTED:
+ print_nested(result, f"{scenario.name}/")
+ else:
+ print(f"(Unhandled output mode {output_mode!r})")
+
+ print()
+
+
+def build_scenarios() -> List[Scenario]:
+ scenarios: List[Scenario] = []
+
+ scenarios.append(
+ Scenario(
+ name="basic_breadth_first",
+ description="Default breadth-first traversal with mixed folders/files",
+ structure={
+ "alpha": {"alpha_file.txt": "alpha", "nested": {"inner.txt": "inner"}},
+ "beta": {"beta_file.txt": "beta"},
+ "zeta": {},
+ "a.txt": "A",
+ "b.txt": "B",
+ },
+ configs=[
+ Config(
+ "string β’ folders-first (name asc)",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ Config(
+ "string β’ folders-first disabled",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": False,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ Config(
+ "flat β’ folders-first",
+ {
+ "output_mode": OUTPUT_MODE_FLAT,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ Config(
+ "nested β’ folders-first",
+ {
+ "output_mode": OUTPUT_MODE_NESTED,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ ],
+ )
+ )
+
+ def setup_sorting(base_rel: str) -> None:
+ entries = [
+ "folder_alpha",
+ "folder_beta",
+ "file_first.txt",
+ "file_second.txt",
+ "file_third.txt",
+ ]
+ for index, entry in enumerate(entries, start=1):
+ abs_path = get_abs_path(os.path.join(base_rel, entry))
+ timestamp = 200_000_0000 + index
+ os.utime(abs_path, (timestamp, timestamp))
+
+ scenarios.append(
+ Scenario(
+ name="sorting_variants",
+ description="Demonstrate sorting by name and timestamp with folders/files",
+ structure={
+ "folder_alpha": {},
+ "folder_beta": {},
+ "file_first.txt": "",
+ "file_second.txt": "",
+ "file_third.txt": "",
+ },
+ configs=[
+ Config(
+ "string β’ sort by name asc",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ Config(
+ "string β’ sort by created desc",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": True,
+ "sort": (SORT_BY_CREATED, SORT_DESC),
+ },
+ ),
+ Config(
+ "flat β’ sort by modified asc",
+ {
+ "output_mode": OUTPUT_MODE_FLAT,
+ "folders_first": True,
+ "sort": (SORT_BY_MODIFIED, SORT_ASC),
+ },
+ ),
+ ],
+ setup=setup_sorting,
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="ignore_and_limits",
+ description="Ignore file semantics with max_folders/max_files summaries",
+ structure={
+ "src": {
+ "main.py": "print('hello')",
+ "utils.py": "pass",
+ "tmp.tmp": "",
+ "cache": {"cached.txt": "", "keep.txt": ""},
+ "modules": {"a.py": "", "b.py": "", "c.py": ""},
+ "pkg": {"alpha.py": "", "beta.py": "", "gamma.py": ""},
+ },
+ "logs": {"2024.log": "", "2025.log": ""},
+ "notes.md": "",
+ "guide.md": "",
+ "todo.md": "",
+ "build.tmp": "",
+ "archive": {},
+ "assets": {},
+ "sandbox": {},
+ "vendor": {},
+ },
+ ignore_content="\n".join(
+ ["*.tmp", "cache/", "!src/cache/keep.txt", "logs/", "!logs/2025.log"]
+ ),
+ configs=[
+ Config(
+ "string β’ folders-first with summaries",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": False,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 1,
+ "max_files": 2,
+ "max_lines": 12,
+ "ignore": "file:.treeignore",
+ },
+ ),
+ Config(
+ "nested β’ inspect truncated branches & comments",
+ {
+ "output_mode": OUTPUT_MODE_NESTED,
+ "folders_first": False,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 1,
+ "max_files": 2,
+ "max_lines": 12,
+ "ignore": "file:.treeignore",
+ },
+ ),
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="limits_exact_match",
+ description="Per-directory limits exactly met (no summary comments)",
+ structure={
+ "pkg": {
+ "a.py": "",
+ "b.py": "",
+ "dir1": {},
+ "dir2": {},
+ }
+ },
+ configs=[
+ Config(
+ "string β’ exact matches (no summaries)",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 2,
+ "max_files": 2,
+ },
+ ),
+ Config(
+ "flat β’ exact matches (no summaries)",
+ {
+ "output_mode": OUTPUT_MODE_FLAT,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 2,
+ "max_files": 2,
+ },
+ ),
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="single_overflow",
+ description="Single overflow entries promoted instead of summary comment",
+ structure={
+ "pkg": {
+ "dir_a": {},
+ "dir_b": {},
+ "file_a.txt": "",
+ }
+ },
+ configs=[
+ Config(
+ "string β’ single folder overflow",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 1,
+ },
+ ),
+ Config(
+ "string β’ single file overflow",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": False,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_files": 1,
+ },
+ ),
+ Config(
+ "flat β’ folders-first",
+ {
+ "output_mode": OUTPUT_MODE_FLAT,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 1,
+ },
+ ),
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="global_max_lines",
+ description="Global max_lines finishing current depth before truncation",
+ structure={
+ "layer1_a": {
+ "layer2_a": {
+ "layer3_a": {
+ "layer4_a": {"layer5_a.txt": ""},
+ }
+ }
+ },
+ "layer1_b": {
+ "layer2_b": {
+ "layer3_b": {
+ "layer4_b": {"layer5_b.txt": ""},
+ }
+ }
+ },
+ "root_file.txt": "",
+ },
+ configs=[
+ Config(
+ "string β’ max_lines=6",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "max_lines": 6,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ Config(
+ "nested β’ max_lines=6",
+ {
+ "output_mode": OUTPUT_MODE_NESTED,
+ "max_lines": 6,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="flat_files_first_limits",
+ description="Flat output with files-first ordering and per-directory summaries",
+ structure={
+ "dir1": {},
+ "dir2": {},
+ "dir3": {},
+ "dir4": {},
+ "a.txt": "",
+ "b.txt": "",
+ "c.txt": "",
+ },
+ configs=[
+ Config(
+ "flat β’ files-first with limits",
+ {
+ "output_mode": OUTPUT_MODE_FLAT,
+ "folders_first": False,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 1,
+ "max_files": 1,
+ },
+ )
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="flat_sort_created_max_lines",
+ description="Flat output sorted by created time with global max_lines",
+ structure={
+ "dirA": {"inner.txt": ""},
+ "file1.txt": "",
+ "file2.txt": "",
+ "file3.txt": "",
+ },
+ setup=lambda base_rel: _apply_timestamps(
+ base_rel,
+ [
+ "dirA",
+ os.path.join("dirA", "inner.txt"),
+ "file1.txt",
+ "file2.txt",
+ "file3.txt",
+ ],
+ base_ts=2_000_001_000,
+ ),
+ configs=[
+ Config(
+ "flat β’ sort by created desc, max_lines=4",
+ {
+ "output_mode": OUTPUT_MODE_FLAT,
+ "folders_first": True,
+ "sort": (SORT_BY_CREATED, SORT_DESC),
+ "max_lines": 4,
+ },
+ )
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="nested_files_first_limits",
+ description="Nested output with files-first ordering and per-directory summaries",
+ structure={
+ "dir": {"a.py": "", "b.py": "", "c.py": ""},
+ "folder_a": {"inner.txt": ""},
+ "folder_b": {},
+ "folder_c": {},
+ },
+ configs=[
+ Config(
+ "nested β’ files-first with limits",
+ {
+ "output_mode": OUTPUT_MODE_NESTED,
+ "folders_first": False,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 1,
+ "max_files": 1,
+ },
+ )
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="nested_max_depth_sort",
+ description="Nested output with created-time ordering and depth pruning",
+ structure={
+ "root": {
+ "branch": {
+ "leaf_a.txt": "",
+ "leaf_b.txt": "",
+ }
+ },
+ "alpha.txt": "",
+ },
+ setup=lambda base_rel: _apply_timestamps(
+ base_rel,
+ [
+ "root",
+ os.path.join("root", "branch"),
+ os.path.join("root", "branch", "leaf_a.txt"),
+ os.path.join("root", "branch", "leaf_b.txt"),
+ "alpha.txt",
+ ],
+ base_ts=2_000_010_000,
+ ),
+ configs=[
+ Config(
+ "nested β’ sort by created asc, max_depth=2",
+ {
+ "output_mode": OUTPUT_MODE_NESTED,
+ "folders_first": True,
+ "sort": (SORT_BY_CREATED, SORT_ASC),
+ "max_depth": 2,
+ },
+ )
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="string_additional_limits",
+ description="String output exercising files-first+max_lines and zero-limit semantics",
+ structure={
+ "dir": {"inner_a.txt": "", "inner_b.txt": ""},
+ "alpha.txt": "",
+ "beta.txt": "",
+ "gamma.txt": "",
+ },
+ setup=lambda base_rel: _apply_timestamps(
+ base_rel,
+ [
+ "dir",
+ os.path.join("dir", "inner_a.txt"),
+ os.path.join("dir", "inner_b.txt"),
+ "alpha.txt",
+ "beta.txt",
+ "gamma.txt",
+ ],
+ base_ts=2_000_020_000,
+ ),
+ configs=[
+ Config(
+ "string β’ files-first, sort=modified desc, max_lines=4",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": False,
+ "sort": (SORT_BY_MODIFIED, SORT_DESC),
+ "max_lines": 4,
+ },
+ ),
+ Config(
+ "string β’ zero file limit acts unlimited",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 2,
+ "max_files": 0,
+ },
+ ),
+ ],
+ )
+ )
+
+ stress_structure = {
+ "level1_a": {
+ "level2_a1": {
+ "leaf_a1_1.txt": "",
+ "leaf_a1_2.txt": "",
+ "leaf_a1_3.txt": "",
+ },
+ "level2_a2": {
+ "leaf_a2_1.txt": "",
+ "leaf_a2_2.txt": "",
+ "leaf_a2_3.txt": "",
+ },
+ "level2_a3": {
+ "subfolder_a3": {
+ "deep_a3_1.txt": "",
+ "deep_a3_2.txt": "",
+ "deep_a3_3.txt": "",
+ "subsubfolder_a3": {
+ "deep_a3_4.txt": "",
+ "deep_a3_5.txt": "",
+ },
+ "subsubfolder_a3_extra": {
+ "deep_a3_extra_1.txt": "",
+ "deep_a3_extra_2.txt": "",
+ },
+ },
+ "subfolder_a3_extra": {
+ "deep_extra_1.txt": "",
+ "deep_extra_2.txt": "",
+ },
+ "subfolder_a3_more": {
+ "deep_more_1.txt": "",
+ },
+ },
+ },
+ "level1_b": {
+ "level2_b1": {
+ "leaf_b1_1.txt": "",
+ "leaf_b1_2.txt": "",
+ },
+ "level2_b2": {
+ "leaf_b2_1.txt": "",
+ "leaf_b2_2.txt": "",
+ "leaf_b2_3.txt": "",
+ "leaf_b2_4.txt": "",
+ "leaf_b2_5.txt": "",
+ },
+ "level2_b3": {
+ "subfolder_b3": {
+ "deep_b3_1.txt": "",
+ "deep_b3_2.txt": "",
+ "deep_b3_3.txt": "",
+ "deep_b3_4.txt": "",
+ },
+ "subfolder_b3_extra": {
+ "deeper_b3_extra.txt": "",
+ "deeper_b3_extra_2.txt": "",
+ },
+ },
+ },
+ "level1_c": {
+ "level2_c1": {
+ "leaf_c1_1.txt": "",
+ "leaf_c1_2.txt": "",
+ "leaf_c1_3.txt": "",
+ "leaf_c1_4.txt": "",
+ "leaf_c1_5.txt": "",
+ },
+ "level2_c2": {
+ "subfolder_c2": {
+ "deep_c2_1.txt": "",
+ "deep_c2_2.txt": "",
+ },
+ "subfolder_c2_extra": {
+ "deep_c2_extra_1.txt": "",
+ },
+ },
+ },
+ "level1_d": {
+ "level2_d1": {
+ "leaf_d1_1.txt": "",
+ "leaf_d1_2.txt": "",
+ "leaf_d1_3.txt": "",
+ },
+ "level2_d2": {
+ "subfolder_d2": {
+ "deep_d2_1.txt": "",
+ "deep_d2_2.txt": "",
+ },
+ },
+ },
+ "root_file.txt": "",
+ "root_notes.md": "",
+ "root_file_2.txt": "",
+ "root_file_3.txt": "",
+ }
+
+ scenarios.append(
+ Scenario(
+ name="mixed_limits_baseline",
+ description="Full structure without truncation for comparison",
+ structure=stress_structure,
+ configs=[
+ Config(
+ "string β’ no limits baseline",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ Config(
+ "flat β’ no limits baseline",
+ {
+ "output_mode": OUTPUT_MODE_FLAT,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ Config(
+ "nested β’ no limits baseline",
+ {
+ "output_mode": OUTPUT_MODE_NESTED,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ },
+ ),
+ ],
+ )
+ )
+
+ scenarios.append(
+ Scenario(
+ name="mixed_limits_stress",
+ description="Same structure with local and global limits applied",
+ structure=stress_structure,
+ configs=[
+ Config(
+ "string β’ mixed local/global limits stress",
+ {
+ "output_mode": OUTPUT_MODE_STRING,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 2,
+ "max_files": 2,
+ "max_lines": 19,
+ },
+ ),
+ Config(
+ "flat β’ mixed limits stress",
+ {
+ "output_mode": OUTPUT_MODE_FLAT,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 2,
+ "max_files": 2,
+ "max_lines": 19,
+ },
+ ),
+ Config(
+ "nested β’ mixed limits stress",
+ {
+ "output_mode": OUTPUT_MODE_NESTED,
+ "folders_first": True,
+ "sort": (SORT_BY_NAME, SORT_ASC),
+ "max_folders": 2,
+ "max_files": 2,
+ "max_lines": 19,
+ },
+ ),
+ ],
+ )
+ )
+
+ return scenarios
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(
+ description="Visualize file_tree() outputs across configurations."
+ )
+ parser.add_argument(
+ "--scenario",
+ action="append",
+ dest="scenarios",
+ help="Scenario name to run (repeat for multiple). Default: run all.",
+ )
+ parser.add_argument(
+ "--list",
+ action="store_true",
+ help="List available scenarios and exit.",
+ )
+ return parser.parse_args()
+
+
+def main() -> None:
+ scenarios = build_scenarios()
+ args = parse_args()
+
+ if args.list:
+ list_scenarios(scenarios)
+ return
+
+ if args.scenarios:
+ name_map = {scenario.name: scenario for scenario in scenarios}
+ unknown = [name for name in args.scenarios if name not in name_map]
+ if unknown:
+ raise SystemExit(f"Unknown scenario(s): {', '.join(unknown)}")
+ selected = [name_map[name] for name in args.scenarios]
+ else:
+ selected = scenarios
+
+ run_scenarios(selected)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/usr/.gitkeep b/usr/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/webui/components/chat/attachments/attachmentsStore.js b/webui/components/chat/attachments/attachmentsStore.js
index bb159d2d1d..7e17b9b554 100644
--- a/webui/components/chat/attachments/attachmentsStore.js
+++ b/webui/components/chat/attachments/attachmentsStore.js
@@ -1,5 +1,6 @@
import { createStore } from "/js/AlpineStore.js";
import { fetchApi } from "/js/api.js";
+import { store as imageViewerStore } from "../../modals/image-viewer/image-viewer-store.js";
const model = {
// State properties
@@ -7,13 +8,6 @@ const model = {
hasAttachments: false,
dragDropOverlayVisible: false,
- // Image modal properties
- currentImageUrl: null,
- currentImageName: null,
- imageLoaded: false,
- imageError: false,
- zoomLevel: 1,
-
async init() {
await this.initialize();
},
@@ -358,7 +352,7 @@ const model = {
previewUrl: previewUrl,
clickHandler: () => {
if (this.isImageFile(filename)) {
- this.openImageModal(this.getServerImgUrl(filename), filename);
+ imageViewerStore.open(this.getServerImgUrl(filename), { name: filename });
} else {
this.downloadAttachment(filename);
}
@@ -380,7 +374,7 @@ const model = {
clickHandler: () => {
if (attachment.type === "image") {
const imageUrl = this.getServerImgUrl(attachment.name);
- this.openImageModal(imageUrl, attachment.name);
+ imageViewerStore.open(imageUrl, { name: attachment.name });
} else {
this.downloadAttachment(attachment.name);
}
@@ -425,50 +419,6 @@ const model = {
);
},
- // Image modal methods
- openImageModal(imageUrl, imageName) {
- this.currentImageUrl = imageUrl;
- this.currentImageName = imageName;
- this.imageLoaded = false;
- this.imageError = false;
- this.zoomLevel = 1;
-
- // Open the modal using the modals system
- if (window.openModal) {
- window.openModal("chat/attachments/imageModal.html");
- }
- },
-
- closeImageModal() {
- this.currentImageUrl = null;
- this.currentImageName = null;
- this.imageLoaded = false;
- this.imageError = false;
- this.zoomLevel = 1;
- },
-
- // Zoom controls
- zoomIn() {
- this.zoomLevel = Math.min(this.zoomLevel * 1.2, 5); // Max 5x zoom
- this.updateImageZoom();
- },
-
- zoomOut() {
- this.zoomLevel = Math.max(this.zoomLevel / 1.2, 0.1); // Min 0.1x zoom
- this.updateImageZoom();
- },
-
- resetZoom() {
- this.zoomLevel = 1;
- this.updateImageZoom();
- },
-
- updateImageZoom() {
- const img = document.querySelector(".modal-image");
- if (img) {
- img.style.transform = `scale(${this.zoomLevel})`;
- }
- },
};
const store = createStore("chatAttachments", model);
diff --git a/webui/components/chat/attachments/inputPreview.html b/webui/components/chat/attachments/inputPreview.html
index 0ed55365ae..5effd1f8da 100644
--- a/webui/components/chat/attachments/inputPreview.html
+++ b/webui/components/chat/attachments/inputPreview.html
@@ -1,5 +1,6 @@