diff --git a/.dockerignore b/.dockerignore index 790ff9ed4c..8092340583 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,9 +5,10 @@ # Large / generated data memory/** -# Logs & tmp +# Logs, tmp, usr logs/* tmp/* +usr/* # Knowledge directory – keep only default/ knowledge/** diff --git a/.gitignore b/.gitignore index 17d47e1c82..c33c0598cf 100644 --- a/.gitignore +++ b/.gitignore @@ -2,10 +2,12 @@ **/.DS_Store **/.env **/__pycache__/ +*.py[cod] **/.conda/ -#Ignore cursor rules +#Ignore IDE files .cursor/ +.windsurf/ # ignore test files in root dir /*.test.py @@ -20,8 +22,9 @@ memory/** # Handle logs directory logs/* -# Handle tmp directory +# Handle tmp and usr directory tmp/* +usr/* # Handle knowledge directory knowledge/** @@ -39,4 +42,7 @@ instruments/** # Global rule to include .gitkeep files anywhere !**/.gitkeep -agent_history.gif \ No newline at end of file + +# for browser-use +agent_history.gif + diff --git a/README.md b/README.md index b0d5c765ff..d2c0d9b4bc 100644 --- a/README.md +++ b/README.md @@ -29,14 +29,8 @@ Or see DeepWiki generated documentation:
-> ### 🚨 **IMPORTANT ANNOUNCEMENT** 🚨 - -The original GitHub and DockerHub repositories for Agent Zero have been transferred to a new namespace: - -- **GitHub & DockerHub:** `agent0ai/agent-zero` - -From now on, please use this name for both `git clone` and `docker pull` commands. - +> ### 🚨 **PROJECTS!** 🚨 +Agent Zero now supports **Projects** – isolated workspaces with their own prompts, files, memory, and secrets, so you can create dedicated setups for each use case without mixing contexts.
@@ -87,6 +81,7 @@ From now on, please use this name for both `git clone` and `docker pull` command - The framework does not guide or limit the agent in any way. There are no hard-coded rails that agents have to follow. - Every prompt, every small message template sent to the agent in its communication loop can be found in the **prompts/** folder and changed. - Every default tool can be found in the **python/tools/** folder and changed or copied to create new predefined tools. +- **Automated configuration** via `A0_SET_` environment variables for deployment automation and easy setup. ![Prompts](/docs/res/prompts.png) @@ -172,6 +167,26 @@ docker run -p 50001:80 agent0ai/agent-zero ## 🎯 Changelog +### v0.9.7 - Projects +[Release video](https://youtu.be/RrTDp_v9V1c) +- Projects management + - Support for custom instructions + - Integration with memory, knowledge, files + - Project specific secrets +- New Welcome screen/Dashboard +- New Wait tool +- Subordinate agent configuration override support +- Support for multiple documents at once in document_query_tool +- Improved context on interventions +- Openrouter embedding support +- Frontend components refactor and polishing +- SSH metadata output fix +- Support for windows powershell in local TTY utility +- More efficient selective streaming for LLMs +- UI output length limit improvements + + + ### v0.9.6 - Memory Dashboard [Release video](https://youtu.be/sizjAq2-d9s) - Memory Management Dashboard diff --git a/agent.py b/agent.py index cb767a5a04..50d47e4c3b 100644 --- a/agent.py +++ b/agent.py @@ -8,11 +8,18 @@ from datetime import datetime, timezone from typing import Any, Awaitable, Coroutine, Dict, Literal from enum import Enum -import uuid import models -from python.helpers import extract_tools, files, errors, history, tokens -from python.helpers import dirty_json +from python.helpers import ( + extract_tools, + files, + errors, + history, + tokens, + context as context_helper, + dirty_json, + subagents +) from python.helpers.print_style import PrintStyle from langchain_core.prompts import ( @@ -53,13 +60,26 @@ def __init__( created_at: datetime | None = None, type: AgentContextType = AgentContextType.USER, last_message: datetime | None = None, + data: dict | None = None, + output_data: dict | None = None, + set_current: bool = False, ): - # build context + # initialize context self.id = id or AgentContext.generate_id() + existing = self._contexts.get(self.id, None) + if existing: + AgentContext.remove(self.id) + self._contexts[self.id] = self + if set_current: + AgentContext.set_current(self.id) + + # initialize state self.name = name self.config = config + self.data = data or {} + self.output_data = output_data or {} self.log = log or Log.Log() - self.agent0 = agent0 or Agent(0, self.config, self) + self.log.context = self self.paused = paused self.streaming_agent = streaming_agent self.task: DeferredTask | None = None @@ -67,18 +87,35 @@ def __init__( self.type = type AgentContext._counter += 1 self.no = AgentContext._counter - # set to start of unix epoch self.last_message = last_message or datetime.now(timezone.utc) - existing = self._contexts.get(self.id, None) - if existing: - AgentContext.remove(self.id) - self._contexts[self.id] = self + # initialize agent at last (context is complete now) + self.agent0 = agent0 or Agent(0, self.config, self) @staticmethod def get(id: str): return AgentContext._contexts.get(id, None) + @staticmethod + def use(id: str): + context = AgentContext.get(id) + if context: + AgentContext.set_current(id) + else: + AgentContext.set_current("") + return context + + @staticmethod + def current(): + ctxid = context_helper.get_context_data("agent_context_id", "") + if not ctxid: + return None + return AgentContext.get(ctxid) + + @staticmethod + def set_current(ctxid: str): + context_helper.set_context_data("agent_context_id", ctxid) + @staticmethod def first(): if not AgentContext._contexts: @@ -92,7 +129,8 @@ def all(): @staticmethod def generate_id(): def generate_short_id(): - return ''.join(random.choices(string.ascii_letters + string.digits, k=8)) + return "".join(random.choices(string.ascii_letters + string.digits, k=8)) + while True: short_id = generate_short_id() if short_id not in AgentContext._contexts: @@ -102,6 +140,7 @@ def generate_short_id(): def get_notification_manager(cls): if cls._notification_manager is None: from python.helpers.notification import NotificationManager # type: ignore + cls._notification_manager = NotificationManager() return cls._notification_manager @@ -112,7 +151,23 @@ def remove(id: str): context.task.kill() return context - def serialize(self): + def get_data(self, key: str, recursive: bool = True): + # recursive is not used now, prepared for context hierarchy + return self.data.get(key, None) + + def set_data(self, key: str, value: Any, recursive: bool = True): + # recursive is not used now, prepared for context hierarchy + self.data[key] = value + + def get_output_data(self, key: str, recursive: bool = True): + # recursive is not used now, prepared for context hierarchy + return self.output_data.get(key, None) + + def set_output_data(self, key: str, value: Any, recursive: bool = True): + # recursive is not used now, prepared for context hierarchy + self.output_data[key] = value + + def output(self): return { "id": self.id, "name": self.name, @@ -132,6 +187,7 @@ def serialize(self): else Localization.get().serialize_datetime(datetime.fromtimestamp(0)) ), "type": self.type.value, + **self.output_data, } @staticmethod @@ -222,7 +278,6 @@ async def _process_chain(self, agent: "Agent", msg: "UserMessage|str", user=True agent.handle_critical_exception(e) - @dataclass class AgentConfig: chat_model: models.ModelConfig @@ -233,7 +288,9 @@ class AgentConfig: profile: str = "" memory_subdir: str = "" knowledge_subdirs: list[str] = field(default_factory=lambda: ["default", "custom"]) - browser_http_headers: dict[str, str] = field(default_factory=dict) # Custom HTTP headers for browser requests + browser_http_headers: dict[str, str] = field( + default_factory=dict + ) # Custom HTTP headers for browser requests code_exec_ssh_enabled: bool = True code_exec_ssh_addr: str = "localhost" code_exec_ssh_port: int = 55022 @@ -260,6 +317,7 @@ def __init__(self, **kwargs): self.last_response = "" self.params_temporary: dict = {} self.params_persistent: dict = {} + self.current_tool = None # override values with kwargs for key, value in kwargs.items(): @@ -306,6 +364,7 @@ def __init__( asyncio.run(self.call_extensions("agent_init")) async def monologue(self): + error_retries = 0 # counter for critical error retries while True: try: # loop data dictionary to pass to extensions @@ -332,7 +391,9 @@ async def monologue(self): prompt = await self.prepare_prompt(loop_data=self.loop_data) # call before_main_llm_call extensions - await self.call_extensions("before_main_llm_call", loop_data=self.loop_data) + await self.call_extensions( + "before_main_llm_call", loop_data=self.loop_data + ) async def reasoning_callback(chunk: str, full: str): await self.handle_intervention() @@ -341,7 +402,9 @@ async def reasoning_callback(chunk: str, full: str): # Pass chunk and full data to extensions for processing stream_data = {"chunk": chunk, "full": full} await self.call_extensions( - "reasoning_stream_chunk", loop_data=self.loop_data, stream_data=stream_data + "reasoning_stream_chunk", + loop_data=self.loop_data, + stream_data=stream_data, ) # Stream masked chunk after extensions processed it if stream_data.get("chunk"): @@ -357,7 +420,9 @@ async def stream_callback(chunk: str, full: str): # Pass chunk and full data to extensions for processing stream_data = {"chunk": chunk, "full": full} await self.call_extensions( - "response_stream_chunk", loop_data=self.loop_data, stream_data=stream_data + "response_stream_chunk", + loop_data=self.loop_data, + stream_data=stream_data, ) # Stream masked chunk after extensions processed it if stream_data.get("chunk"): @@ -405,6 +470,7 @@ async def stream_callback(chunk: str, full: str): # exceptions inside message loop: except InterventionException as e: + error_retries = 0 # reset retry counter on user intervention pass # intervention message has been handled in handle_intervention(), proceed with conversation loop except RepairableException as e: # Forward repairable errors to the LLM, maybe it can fix them @@ -414,8 +480,10 @@ async def stream_callback(chunk: str, full: str): PrintStyle(font_color="red", padding=True).print(msg["message"]) self.context.log.log(type="error", content=msg["message"]) except Exception as e: - # Other exception kill the loop - self.handle_critical_exception(e) + # Retry critical exceptions before failing + error_retries = await self.retry_critical_exception( + e, error_retries + ) finally: # call message_loop_end extensions @@ -425,9 +493,13 @@ async def stream_callback(chunk: str, full: str): # exceptions outside message loop: except InterventionException as e: + error_retries = 0 # reset retry counter on user intervention pass # just start over except Exception as e: - self.handle_critical_exception(e) + # Retry critical exceptions before failing + error_retries = await self.retry_critical_exception( + e, error_retries + ) finally: self.context.streaming_agent = None # unset current streamer # call monologue_end extensions @@ -484,6 +556,30 @@ async def prepare_prompt(self, loop_data: LoopData) -> list[BaseMessage]: return full_prompt + async def retry_critical_exception( + self, e: Exception, error_retries: int, delay: int = 3, max_retries: int = 1 + ) -> int: + if error_retries >= max_retries: + self.handle_critical_exception(e) + + error_message = errors.format_error(e) + + self.context.log.log( + type="warning", content="Critical error occurred, retrying..." + ) + PrintStyle(font_color="orange", padding=True).print( + "Critical error occurred, retrying..." + ) + await asyncio.sleep(delay) + agent_facing_error = self.read_prompt( + "fw.msg_critical_error.md", error_message=error_message + ) + self.hist_add_warning(message=agent_facing_error) + PrintStyle(font_color="orange", padding=True).print( + agent_facing_error + ) + return error_retries + 1 + def handle_critical_exception(self, exception: Exception): if isinstance(exception, HandledException): raise exception # Re-raise the exception to kill the loop @@ -522,28 +618,17 @@ async def get_system_prompt(self, loop_data: LoopData) -> list[str]: return system_prompt def parse_prompt(self, _prompt_file: str, **kwargs): - dirs = [files.get_abs_path("prompts")] - if ( - self.config.profile - ): # if agent has custom folder, use it and use default as backup - prompt_dir = files.get_abs_path("agents", self.config.profile, "prompts") - dirs.insert(0, prompt_dir) + dirs = subagents.get_paths(self, "prompts") prompt = files.parse_file( - _prompt_file, _directories=dirs, **kwargs + _prompt_file, _directories=dirs, _agent=self, **kwargs ) return prompt def read_prompt(self, file: str, **kwargs) -> str: - dirs = [files.get_abs_path("prompts")] - if ( - self.config.profile - ): # if agent has custom folder, use it and use default as backup - prompt_dir = files.get_abs_path("agents", self.config.profile, "prompts") - dirs.insert(0, prompt_dir) - prompt = files.read_prompt_file( - file, _directories=dirs, **kwargs - ) - prompt = files.remove_code_fences(prompt) + dirs = subagents.get_paths(self, "prompts") + prompt = files.read_prompt_file(file, _directories=dirs, _agent=self, **kwargs) + if files.is_full_json_template(prompt): + prompt = files.remove_code_fences(prompt) return prompt def get_data(self, field: str): @@ -558,8 +643,12 @@ def hist_add_message( self.last_message = datetime.now(timezone.utc) # Allow extensions to process content before adding to history content_data = {"content": content} - asyncio.run(self.call_extensions("hist_add_before", content_data=content_data, ai=ai)) - return self.history.add_message(ai=ai, content=content_data["content"], tokens=tokens) + asyncio.run( + self.call_extensions("hist_add_before", content_data=content_data, ai=ai) + ) + return self.history.add_message( + ai=ai, content=content_data["content"], tokens=tokens + ) def hist_add_user_message(self, message: UserMessage, intervention: bool = False): self.history.new_topic() # user message starts a new topic in history @@ -671,8 +760,10 @@ async def stream_callback(chunk: str, total: str): response, _reasoning = await call_data["model"].unified_call( system_message=call_data["system"], user_message=call_data["message"], - response_callback=stream_callback, - rate_limiter_callback=self.rate_limiter_callback if not call_data["background"] else None, + response_callback=stream_callback if call_data["callback"] else None, + rate_limiter_callback=( + self.rate_limiter_callback if not call_data["background"] else None + ), ) return response @@ -694,7 +785,9 @@ async def call_chat_model( messages=messages, reasoning_callback=reasoning_callback, response_callback=response_callback, - rate_limiter_callback=self.rate_limiter_callback if not background else None, + rate_limiter_callback=( + self.rate_limiter_callback if not background else None + ), ) return response, reasoning @@ -714,6 +807,13 @@ async def handle_intervention(self, progress: str = ""): ): # if there is an intervention message, but not yet processed msg = self.intervention self.intervention = None # reset the intervention message + # If a tool was running, save its progress to history + last_tool = self.loop_data.current_tool + if last_tool: + tool_progress = last_tool.progress.strip() + if tool_progress: + self.hist_add_tool_result(last_tool.name, tool_progress) + last_tool.set_progress(None) if progress.strip(): self.hist_add_ai_response(progress) # append the intervention message @@ -762,31 +862,44 @@ async def process_tools(self, msg: str): # Fallback to local get_tool if MCP tool was not found or MCP lookup failed if not tool: tool = self.get_tool( - name=tool_name, method=tool_method, args=tool_args, message=msg, loop_data=self.loop_data + name=tool_name, + method=tool_method, + args=tool_args, + message=msg, + loop_data=self.loop_data, ) if tool: - await self.handle_intervention() + self.loop_data.current_tool = tool # type: ignore + try: + await self.handle_intervention() + # Call tool hooks for compatibility + await tool.before_execution(**tool_args) + await self.handle_intervention() - # Call tool hooks for compatibility - await tool.before_execution(**tool_args) - await self.handle_intervention() + # Allow extensions to preprocess tool arguments + await self.call_extensions( + "tool_execute_before", + tool_args=tool_args or {}, + tool_name=tool_name, + ) - # Allow extensions to preprocess tool arguments - await self.call_extensions("tool_execute_before", tool_args=tool_args or {}, tool_name=tool_name) + response = await tool.execute(**tool_args) + await self.handle_intervention() - response = await tool.execute(**tool_args) - await self.handle_intervention() + # Allow extensions to postprocess tool response + await self.call_extensions( + "tool_execute_after", response=response, tool_name=tool_name + ) - # Allow extensions to postprocess tool response - await self.call_extensions("tool_execute_after", response=response, tool_name=tool_name) - - await tool.after_execution(response) - await self.handle_intervention() + await tool.after_execution(response) + await self.handle_intervention() - if response.break_loop: - return response.message + if response.break_loop: + return response.message + finally: + self.loop_data.current_tool = None else: error_detail = ( f"Tool '{raw_tool_name}' not found or could not be initialized." @@ -831,34 +944,40 @@ async def handle_response_stream(self, stream: str): pass def get_tool( - self, name: str, method: str | None, args: dict, message: str, loop_data: LoopData | None, **kwargs + self, + name: str, + method: str | None, + args: dict, + message: str, + loop_data: LoopData | None, + **kwargs, ): from python.tools.unknown import Unknown from python.helpers.tool import Tool classes = [] - # try agent tools first - if self.config.profile: + # search for tools in agent's folder hierarchy + paths = subagents.get_paths(self, "tools", name + ".py", default_root="python") + for path in paths: try: - classes = extract_tools.load_classes_from_file( - "agents/" + self.config.profile + "/tools/" + name + ".py", Tool # type: ignore[arg-type] - ) + classes = extract_tools.load_classes_from_file(path, Tool) # type: ignore[arg-type] + break except Exception: - pass + continue - # try default tools - if not classes: - try: - classes = extract_tools.load_classes_from_file( - "python/tools/" + name + ".py", Tool # type: ignore[arg-type] - ) - except Exception as e: - pass tool_class = classes[0] if classes else Unknown return tool_class( - agent=self, name=name, method=method, args=args, message=message, loop_data=loop_data, **kwargs + agent=self, + name=name, + method=method, + args=args, + message=message, + loop_data=loop_data, + **kwargs, ) async def call_extensions(self, extension_point: str, **kwargs) -> Any: - return await call_extensions(extension_point=extension_point, agent=self, **kwargs) + return await call_extensions( + extension_point=extension_point, agent=self, **kwargs + ) diff --git a/agents/agent0/agent.json b/agents/agent0/agent.json new file mode 100644 index 0000000000..4fa2cb2c12 --- /dev/null +++ b/agents/agent0/agent.json @@ -0,0 +1,5 @@ +{ + "title": "Agent 0", + "description": "Main agent of the system communicating directly with the user.", + "context": "" +} diff --git a/agents/default/agent.json b/agents/default/agent.json new file mode 100644 index 0000000000..846d2a679f --- /dev/null +++ b/agents/default/agent.json @@ -0,0 +1,5 @@ +{ + "title": "Default prompts", + "description": "Default prompt file templates. Should be inherited and overriden by specialized prompt profiles.", + "context": "" +} diff --git a/agents/developer/agent.json b/agents/developer/agent.json new file mode 100644 index 0000000000..8680176e36 --- /dev/null +++ b/agents/developer/agent.json @@ -0,0 +1,5 @@ +{ + "title": "Developer", + "description": "Agent specialized in complex software development.", + "context": "Use this agent for software development tasks, including writing code, debugging, refactoring, and architectural design." +} diff --git a/agents/hacker/agent.json b/agents/hacker/agent.json new file mode 100644 index 0000000000..cde645d798 --- /dev/null +++ b/agents/hacker/agent.json @@ -0,0 +1,5 @@ +{ + "title": "Hacker", + "description": "Agent specialized in cyber security and penetration testing.", + "context": "Use this agent for cybersecurity tasks such as penetration testing, vulnerability analysis, and security auditing." +} diff --git a/agents/researcher/agent.json b/agents/researcher/agent.json new file mode 100644 index 0000000000..e06a9639b5 --- /dev/null +++ b/agents/researcher/agent.json @@ -0,0 +1,5 @@ +{ + "title": "Researcher", + "description": "Agent specialized in research, data analysis and reporting.", + "context": "Use this agent for information gathering, data analysis, topic research, and generating comprehensive reports." +} diff --git a/conf/model_providers.yaml b/conf/model_providers.yaml index 0683cf0119..930c229965 100644 --- a/conf/model_providers.yaml +++ b/conf/model_providers.yaml @@ -27,6 +27,9 @@ chat: anthropic: name: Anthropic litellm_provider: anthropic + cometapi: + name: CometAPI + litellm_provider: cometapi deepseek: name: DeepSeek litellm_provider: deepseek @@ -35,8 +38,9 @@ chat: litellm_provider: github_copilot kwargs: extra_headers: - "Editor-Version": "vscode/1.85.1" - "Copilot-Integration-Id": "vscode-chat" + "Editor-Version": "vscode/1.85.1" + "Copilot-Integration-Id": "vscode-chat" + "Copilot-Vision-Request": "true" google: name: Google litellm_provider: gemini @@ -61,6 +65,9 @@ chat: azure: name: OpenAI Azure litellm_provider: azure + bedrock: + name: AWS Bedrock + litellm_provider: bedrock openrouter: name: OpenRouter litellm_provider: openrouter @@ -81,6 +88,16 @@ chat: xai: name: xAI litellm_provider: xai + zai: + name: Z.AI + litellm_provider: openai + kwargs: + api_base: https://api.z.ai/api/paas/v4 + zai_coding: + name: Z.AI Coding + litellm_provider: openai + kwargs: + api_base: https://api.z.ai/api/coding/paas/v4 other: name: Other OpenAI compatible litellm_provider: openai @@ -107,6 +124,18 @@ embedding: azure: name: OpenAI Azure litellm_provider: azure + bedrock: + name: AWS Bedrock + litellm_provider: bedrock + # TODO: OpenRouter not yet supported by LiteLLM, replace with native litellm_provider openrouter and remove api_base when ready + openrouter: + name: OpenRouter + litellm_provider: openai + kwargs: + api_base: https://openrouter.ai/api/v1 + extra_headers: + "HTTP-Referer": "https://agent-zero.ai/" + "X-Title": "Agent Zero" other: name: Other OpenAI compatible - litellm_provider: openai \ No newline at end of file + litellm_provider: openai diff --git a/conf/projects.default.gitignore b/conf/projects.default.gitignore new file mode 100644 index 0000000000..9a5f01f2ae --- /dev/null +++ b/conf/projects.default.gitignore @@ -0,0 +1,13 @@ +# A0 project meta folder +.a0proj/ + +# Python environments & cache +venv/ +**/__pycache__/ + +# Node.js dependencies +**/node_modules/ +**/.npm/ + +# Version control metadata +**/.git/ diff --git a/docker/base/fs/ins/install_python.sh b/docker/base/fs/ins/install_python.sh index 82f2fc3832..417395ebcc 100644 --- a/docker/base/fs/ins/install_python.sh +++ b/docker/base/fs/ins/install_python.sh @@ -20,7 +20,7 @@ python3.13 -m venv /opt/venv source /opt/venv/bin/activate # upgrade pip and install static packages -pip install --no-cache-dir --upgrade pip ipython requests +pip install --no-cache-dir --upgrade pip pipx ipython requests echo "====================PYTHON PYVENV====================" diff --git a/docker/run/fs/ins/install_A0.sh b/docker/run/fs/ins/install_A0.sh index 7b5d0d8073..0aeaf13ff8 100644 --- a/docker/run/fs/ins/install_A0.sh +++ b/docker/run/fs/ins/install_A0.sh @@ -36,6 +36,8 @@ fi # Install remaining A0 python packages uv pip install -r /git/agent-zero/requirements.txt +# override for packages that have unnecessarily strict dependencies +uv pip install -r /git/agent-zero/requirements2.txt # install playwright bash /ins/install_playwright.sh "$@" diff --git a/docs/connectivity.md b/docs/connectivity.md index 8cfbe250ec..a6f465eaba 100644 --- a/docs/connectivity.md +++ b/docs/connectivity.md @@ -25,6 +25,7 @@ Send messages to Agent Zero and receive responses. Supports text messages, file * `message` (string, required): The message to send * `attachments` (array, optional): Array of `{filename, base64}` objects * `lifetime_hours` (number, optional): Chat lifetime in hours (default: 24) +* `project` (string, optional): Project name to activate (only on first message) **Headers:** * `X-API-KEY` (required) @@ -169,6 +170,63 @@ async function sendWithAttachment() { sendWithAttachment(); ``` +#### Project Usage Example + +```javascript +// Working with projects +async function sendMessageWithProject() { + try { + // First message - activate project + const response = await fetch('YOUR_AGENT_ZERO_URL/api_message', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-API-KEY': 'YOUR_API_KEY' + }, + body: JSON.stringify({ + message: "Analyze the project structure", + project: "my-web-app" // Activates this project + }) + }); + + const data = await response.json(); + + if (response.ok) { + console.log('βœ… Project activated!'); + console.log('Context ID:', data.context_id); + console.log('Response:', data.response); + + // Continue conversation - project already set + const followUp = await fetch('YOUR_AGENT_ZERO_URL/api_message', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-API-KEY': 'YOUR_API_KEY' + }, + body: JSON.stringify({ + context_id: data.context_id, + message: "What files are in the project?" + // Do NOT include project field here - already set on first message + }) + }); + + const followUpData = await followUp.json(); + console.log('Follow-up response:', followUpData.response); + return followUpData; + } else { + console.error('❌ Error:', data.error); + return null; + } + } catch (error) { + console.error('❌ Request failed:', error); + return null; + } +} + +// Call the function +sendMessageWithProject(); +``` + --- ## `GET/POST /api_log_get` @@ -568,6 +626,30 @@ Below is an example of a `mcp.json` configuration file that a client could use t } ``` +### Project Support in MCP + +You can specify a project for MCP connections by including it in the URL path: + +```json +{ + "mcpServers": { + "agent-zero-with-project": { + "type": "sse", + "url": "YOUR_AGENT_ZERO_URL/mcp/t-YOUR_API_TOKEN/p-my-project-name/sse" + }, + "agent-zero-http-with-project": { + "type": "streamable-http", + "url": "YOUR_AGENT_ZERO_URL/mcp/t-YOUR_API_TOKEN/p-my-project-name/http/" + } + } +} +``` + +When a project is specified in the URL: +- All new chats will be created within that project context +- The agent will have access to project-specific instructions, knowledge, and file structure +- Attempting to use an existing chat_id from a different project will result in an error + --- ## A2A (Agent-to-Agent) Connectivity @@ -583,3 +665,14 @@ To connect another agent to your Agent Zero instance, use the following URL form ``` YOUR_AGENT_ZERO_URL/a2a/t-YOUR_API_TOKEN ``` + +To connect with a specific project active: + +``` +YOUR_AGENT_ZERO_URL/a2a/t-YOUR_API_TOKEN/p-PROJECT_NAME +``` + +When a project is specified: +- All A2A conversations will run in the context of that project +- The agent will have access to project-specific resources, instructions, and knowledge +- This enables project-isolated agent-to-agent communication diff --git a/docs/development.md b/docs/development.md index 54fe39580e..9faa1805bd 100644 --- a/docs/development.md +++ b/docs/development.md @@ -149,6 +149,20 @@ You're now ready to contribute to Agent Zero, create custom extensions, or modif - See [extensibility](extensibility.md) for instructions on how to create custom extensions. - See [contribution](contribution.md) for instructions on how to contribute to the framework. +## Configuration via Environment Variables + +For development and testing, you can override default settings using the `.env` file with `A0_SET_` prefixed variables: + +```env +# Add to your .env file +A0_SET_chat_model_provider=ollama +A0_SET_chat_model_name=llama3.2 +A0_SET_chat_model_api_base=http://localhost:11434 +A0_SET_memory_recall_interval=5 +``` + +These environment variables automatically override the hardcoded defaults in `get_default_settings()` without modifying code. Useful for testing different configurations or multi-environment setups. + ## Want to build your docker image? - You can use the `DockerfileLocal` to build your docker image. - Navigate to your project root in the terminal and run `docker build -f DockerfileLocal -t agent-zero-local --build-arg CACHE_DATE=$(date +%Y-%m-%d:%H:%M:%S) .` diff --git a/docs/extensibility.md b/docs/extensibility.md index bf70d38975..db10a2a887 100644 --- a/docs/extensibility.md +++ b/docs/extensibility.md @@ -213,6 +213,11 @@ Agent Zero supports creating specialized subagents with customized behavior. The - `/agents/{agent_profile}/extensions/` - for custom extensions - `/agents/{agent_profile}/tools/` - for custom tools - `/agents/{agent_profile}/prompts/` - for custom prompts + - `/agents/{agent_profile}/settings.json` - for agent-specific configuration overrides + +The `settings.json` file for an agent uses the same structure as `tmp/settings.json`, but you only need to specify the fields you want to override. Any field omitted from the agent-specific `settings.json` will continue to use the global value. + +This allows power users to, for example, change the AI model, context window size, or other settings for a single agent without affecting the rest of the system. ### Example Subagent Structure @@ -223,15 +228,71 @@ Agent Zero supports creating specialized subagents with customized behavior. The β”‚ └── _10_example_extension.py β”œβ”€β”€ prompts/ β”‚ └── ... -└── tools/ - β”œβ”€β”€ example_tool.py - └── response.py +β”œβ”€β”€ tools/ +β”‚ β”œβ”€β”€ example_tool.py +β”‚ └── response.py +└── settings.json ``` In this example: - `_10_example_extension.py` is an extension that renames the agent when initialized - `response.py` overrides the default response tool with custom behavior - `example_tool.py` is a new tool specific to this agent +- `settings.json` overrides any global settings for this specific agent (only for the fields defined in this file) + +## Projects + +Projects provide isolated workspaces for individual chats, keeping prompts, memory, knowledge, files, and secrets scoped to a specific use case. + +### Project Location and Structure + +- Projects are located under `/a0/usr/projects/` +- Each project has its own subdirectory, created by users via the UI +- A project can be backed up or restored by copying or downloading its entire directory + +Each project directory contains a hidden `.a0proj` folder with project metadata and configuration: + +``` +/a0/usr/projects/{project_name}/ +└── .a0proj/ + β”œβ”€β”€ project.json # project metadata and settings + β”œβ”€β”€ instructions/ # additional prompt/instruction files + β”œβ”€β”€ knowledge/ # files to be imported into memory + β”œβ”€β”€ memory/ # project-specific memory storage + β”œβ”€β”€ secrets.env # sensitive variables (secrets) + └── variables.env # non-sensitive variables +``` + +### Behavior When a Project Is Active in a Chat + +When a project is activated for a chat: + +- The agent is instructed to work **inside the project directory** +- Project prompts (instructions) from `.a0proj/instructions/` are **automatically injected** into the context window (all text files are imported) +- Memory can be configured as **project-specific**, meaning: + - It does not mix with global memory + - The memory file is stored under `.a0proj/memory/` +- Files created or modified by the agent are located within the project directory + +The `.a0proj/knowledge/` folder contains files that are imported into the project’s memory, enabling project-focused knowledge bases. + +### Secrets and Variables + +Each project manages its own configuration values via environment files in `.a0proj/`: + +- `secrets.env` – **sensitive variables**, such as API keys or passwords +- `variables.env` – **non-sensitive variables**, such as configuration flags or identifiers + +These files allow you to keep credentials and configuration tightly scoped to a single project. + +### When to Use Projects + +Projects are the recommended way to create specialized workflows in Agent Zero when you need to: + +- Add specific instructions without affecting global behavior +- Isolate file context, knowledge, and memory for a particular task or client +- Keep passwords and other secrets scoped to a single workspace +- Run multiple independent flows side by side under the same Agent Zero installation ## Best Practices - Keep extensions focused on a single responsibility diff --git a/docs/installation.md b/docs/installation.md index b8688f0919..c611b1b798 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -93,9 +93,55 @@ The following user guide provides instructions for installing and running Agent - `/tmp/settings.json` - Your Agent Zero settings > [!TIP] -> Choose a location that's easy to access and backup. All your Agent Zero data +> Choose a location that's easy to access and backup. All your Agent Zero data > will be directly accessible in this directory. +### Automated Configuration via Environment Variables + +Agent Zero settings can be automatically configured using environment variables with the `A0_SET_` prefix in your `.env` file. This enables automated deployments without manual configuration. + +**Usage:** +Add variables to your `.env` file in the format: +``` +A0_SET_{setting_name}={value} +``` + +**Examples:** +```env +# Model configuration +A0_SET_chat_model_provider=anthropic +A0_SET_chat_model_name=claude-3-5-sonnet-20241022 +A0_SET_chat_model_ctx_length=200000 + +# Memory settings +A0_SET_memory_recall_enabled=true +A0_SET_memory_recall_interval=5 + +# Agent configuration +A0_SET_agent_profile=custom +A0_SET_agent_memory_subdir=production +``` + +**Docker usage:** +When running Docker, you can pass these as environment variables: +```bash +docker run -p 50080:80 \ + -e A0_SET_chat_model_provider=anthropic \ + -e A0_SET_chat_model_name=claude-3-5-sonnet-20241022 \ + agent0ai/agent-zero +``` + +**Type conversion:** +- Strings are used as-is +- Numbers are automatically converted (e.g., "100000" becomes integer 100000) +- Booleans accept: true/false, 1/0, yes/no, on/off (case-insensitive) +- Dictionaries must be valid JSON (e.g., `{"temperature": "0"}`) + +**Notes:** +- These provide initial default values when settings.json doesn't exist or when new settings are added to the application. Once a value is saved in settings.json, it takes precedence over these environment variables. +- Sensitive settings (API keys, passwords) use their existing environment variables +- Container/process restart required for changes to take effect + 2.3. Run the container: - In Docker Desktop, go back to the "Images" tab - Click the `Run` button next to the `agent0ai/agent-zero` image diff --git a/docs/res/banner_high.png b/docs/res/banner_high.png new file mode 100644 index 0000000000..69e4155628 Binary files /dev/null and b/docs/res/banner_high.png differ diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 97ebd58fa8..487ec524d2 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -12,7 +12,7 @@ This page addresses frequently asked questions (FAQ) and provides troubleshootin Refer to the [Choosing your LLMs](installation.md#installing-and-using-ollama-local-models) section of the documentation for detailed instructions and examples for configuring different LLMs. Local models can be run using Ollama or LM Studio. > [!TIP] -> Some LLM providers offer free usage of their APIs, for example Groq, Mistral or SambaNova. +> Some LLM providers offer free usage of their APIs, for example Groq, Mistral, SambaNova or CometAPI. **6. How can I make Agent Zero retain memory between sessions?** Refer to the [How to update Agent Zero](installation.md#how-to-update-agent-zero) section of the documentation for instructions on how to update Agent Zero while retaining memory and data. diff --git a/initialize.py b/initialize.py index 3c42c952e5..ec26227fa9 100644 --- a/initialize.py +++ b/initialize.py @@ -4,8 +4,10 @@ from python.helpers.print_style import PrintStyle -def initialize_agent(): +def initialize_agent(override_settings: dict | None = None): current_settings = settings.get_settings() + if override_settings: + current_settings = settings.merge_settings(current_settings, override_settings) def _normalize_model_kwargs(kwargs: dict) -> dict: # convert string values that represent valid Python numbers to numeric types diff --git a/models.py b/models.py index 12c36afb49..4676352c29 100644 --- a/models.py +++ b/models.py @@ -22,7 +22,7 @@ from python.helpers import dotenv from python.helpers import settings, dirty_json from python.helpers.dotenv import load_dotenv -from python.helpers.providers import get_provider_config +from python.helpers.providers import ModelType as ProviderModelType, get_provider_config from python.helpers.rate_limiter import RateLimiter from python.helpers.tokens import approximate_tokens from python.helpers import dirty_json, browser_use_monkeypatch @@ -41,6 +41,7 @@ ) from langchain.embeddings.base import Embeddings from sentence_transformers import SentenceTransformer +from pydantic import ConfigDict # disable extra logging, must be done repeatedly, otherwise browser-use will turn it back on for some reason @@ -106,17 +107,17 @@ def __init__(self, chunk: ChatChunk|None = None): def add_chunk(self, chunk: ChatChunk) -> ChatChunk: if chunk["reasoning_delta"]: self.native_reasoning = True - + # if native reasoning detection works, there's no need to worry about thinking tags if self.native_reasoning: processed_chunk = ChatChunk(response_delta=chunk["response_delta"], reasoning_delta=chunk["reasoning_delta"]) else: # if the model outputs thinking tags, we ned to parse them manually as reasoning processed_chunk = self._process_thinking_chunk(chunk) - + self.reasoning += processed_chunk["reasoning_delta"] self.response += processed_chunk["response_delta"] - + return processed_chunk def _process_thinking_chunk(self, chunk: ChatChunk) -> ChatChunk: @@ -145,7 +146,7 @@ def _process_thinking_tags(self, response: str, reasoning: str) -> ChatChunk: response = response[len(opening_tag):] self.thinking = True self.thinking_tag = closing_tag - + close_pos = response.find(closing_tag) if close_pos != -1: reasoning += response[:close_pos] @@ -164,7 +165,7 @@ def _process_thinking_tags(self, response: str, reasoning: str) -> ChatChunk: self.unprocessed = response response = "" break - + return ChatChunk(response_delta=response, reasoning_delta=reasoning) def _is_partial_opening_tag(self, text: str, opening_tag: str) -> bool: @@ -191,7 +192,7 @@ def output(self) -> ChatChunk: else: response += self.unprocessed return ChatChunk(response_delta=response, reasoning_delta=reasoning) - + rate_limiters: dict[str, RateLimiter] = {} api_keys_round_robin: dict[str, int] = {} @@ -293,10 +294,11 @@ class LiteLLMChatWrapper(SimpleChatModel): provider: str kwargs: dict = {} - class Config: - arbitrary_types_allowed = True - extra = "allow" # Allow extra attributes - validate_assignment = False # Don't validate on assignment + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="allow", + validate_assignment=False, + ) def __init__( self, @@ -487,6 +489,7 @@ async def unified_call( call_kwargs: dict[str, Any] = {**self.kwargs, **kwargs} max_retries: int = int(call_kwargs.pop("a0_retry_attempts", 2)) retry_delay_s: float = float(call_kwargs.pop("a0_retry_delay_seconds", 1.5)) + stream = reasoning_callback is not None or response_callback is not None or tokens_callback is not None # results result = ChatGenerationResult() @@ -499,48 +502,59 @@ async def unified_call( _completion = await acompletion( model=self.model_name, messages=msgs_conv, - stream=True, + stream=stream, **call_kwargs, ) - # iterate over chunks - async for chunk in _completion: # type: ignore - got_any_chunk = True - # parse chunk - parsed = _parse_chunk(chunk) + if stream: + # iterate over chunks + async for chunk in _completion: # type: ignore + got_any_chunk = True + # parse chunk + parsed = _parse_chunk(chunk) + output = result.add_chunk(parsed) + + # collect reasoning delta and call callbacks + if output["reasoning_delta"]: + if reasoning_callback: + await reasoning_callback(output["reasoning_delta"], result.reasoning) + if tokens_callback: + await tokens_callback( + output["reasoning_delta"], + approximate_tokens(output["reasoning_delta"]), + ) + # Add output tokens to rate limiter if configured + if limiter: + limiter.add(output=approximate_tokens(output["reasoning_delta"])) + # collect response delta and call callbacks + if output["response_delta"]: + if response_callback: + await response_callback(output["response_delta"], result.response) + if tokens_callback: + await tokens_callback( + output["response_delta"], + approximate_tokens(output["response_delta"]), + ) + # Add output tokens to rate limiter if configured + if limiter: + limiter.add(output=approximate_tokens(output["response_delta"])) + + # non-stream response + else: + parsed = _parse_chunk(_completion) output = result.add_chunk(parsed) - - # collect reasoning delta and call callbacks - if output["reasoning_delta"]: - if reasoning_callback: - await reasoning_callback(output["reasoning_delta"], result.reasoning) - if tokens_callback: - await tokens_callback( - output["reasoning_delta"], - approximate_tokens(output["reasoning_delta"]), - ) - # Add output tokens to rate limiter if configured - if limiter: - limiter.add(output=approximate_tokens(output["reasoning_delta"])) - # collect response delta and call callbacks - if output["response_delta"]: - if response_callback: - await response_callback(output["response_delta"], result.response) - if tokens_callback: - await tokens_callback( - output["response_delta"], - approximate_tokens(output["response_delta"]), - ) - # Add output tokens to rate limiter if configured - if limiter: + if limiter: + if output["response_delta"]: limiter.add(output=approximate_tokens(output["response_delta"])) + if output["reasoning_delta"]: + limiter.add(output=approximate_tokens(output["reasoning_delta"])) # Successful completion of stream return result.response, result.reasoning except Exception as e: import asyncio - + # Retry only if no chunks received and error is transient if got_any_chunk or not _is_transient_litellm_error(e) or attempt >= max_retries: raise @@ -799,12 +813,16 @@ def _parse_chunk(chunk: Any) -> ChatChunk: message.get("content", "") if isinstance(message, dict) else getattr(message, "content", "") - ) + ) or "" reasoning_delta = ( delta.get("reasoning_content", "") if isinstance(delta, dict) else getattr(delta, "reasoning_content", "") - ) + ) or ( + message.get("reasoning_content", "") + if isinstance(message, dict) + else getattr(message, "reasoning_content", "") + ) or "" return ChatChunk(reasoning_delta=reasoning_delta, response_delta=response_delta) @@ -826,7 +844,7 @@ def _adjust_call_args(provider_name: str, model_name: str, kwargs: dict): def _merge_provider_defaults( - provider_type: str, original_provider: str, kwargs: dict + provider_type: ProviderModelType, original_provider: str, kwargs: dict ) -> tuple[str, dict]: # Normalize .env-style numeric strings (e.g., "timeout=30") into ints/floats for LiteLLM def _normalize_values(values: dict) -> dict: diff --git a/prompts/agent.extras.project.file_structure.md b/prompts/agent.extras.project.file_structure.md new file mode 100644 index 0000000000..5167003f94 --- /dev/null +++ b/prompts/agent.extras.project.file_structure.md @@ -0,0 +1,9 @@ +# File structure of project {{project_name}} +- this is filtered overview not full scan +- list yourself if needed +- maximum depth: {{max_depth}} +- ignored: +{{gitignore}} + +## file tree +{{file_structure}} \ No newline at end of file diff --git a/prompts/agent.system.main.tips.md b/prompts/agent.system.main.tips.md index d1818c1a8a..1940d912f0 100644 --- a/prompts/agent.system.main.tips.md +++ b/prompts/agent.system.main.tips.md @@ -7,7 +7,7 @@ never assume success memory refers memory tools not own knowledge ## Files -save files in /root +when not in project save files in /root don't use spaces in file names ## Instruments diff --git a/prompts/agent.system.projects.active.md b/prompts/agent.system.projects.active.md new file mode 100644 index 0000000000..3d2ae63a91 --- /dev/null +++ b/prompts/agent.system.projects.active.md @@ -0,0 +1,12 @@ +## Active project +Path: {{project_path}} +Title: {{project_name}} +Description: {{project_description}} + + +### Important project instructions MUST follow +- always work inside {{project_path}} directory +- do not rename project directory do not change meta files in .a0proj folder +- cleanup when code accidentaly creates files outside move them + +{{project_instructions}} \ No newline at end of file diff --git a/prompts/agent.system.projects.inactive.md b/prompts/agent.system.projects.inactive.md new file mode 100644 index 0000000000..5cdd943900 --- /dev/null +++ b/prompts/agent.system.projects.inactive.md @@ -0,0 +1 @@ +no project currently activated \ No newline at end of file diff --git a/prompts/agent.system.projects.main.md b/prompts/agent.system.projects.main.md new file mode 100644 index 0000000000..1b6890de60 --- /dev/null +++ b/prompts/agent.system.projects.main.md @@ -0,0 +1,5 @@ +# Projects +- user can create and activate projects +- projects have work folder in /usr/projects/ and instructions and config in /usr/projects//.a0proj +- when activated agent works in project follows project instructions +- agent cannot manipulate or switch projects \ No newline at end of file diff --git a/prompts/agent.system.tool.call_sub.md b/prompts/agent.system.tool.call_sub.md index b2e267f932..c5c22dc75d 100644 --- a/prompts/agent.system.tool.call_sub.md +++ b/prompts/agent.system.tool.call_sub.md @@ -1,3 +1,4 @@ +{{if agent_profiles}} ### call_subordinate you can use subordinates for subtasks @@ -31,4 +32,5 @@ example usage - you might be part of long chain of subordinates, avoid slow and expensive rewriting subordinate responses, instead use `Β§Β§include()` alias to include the response as is **available profiles:** -{{agent_profiles}} \ No newline at end of file +{{agent_profiles}} +{{endif}} \ No newline at end of file diff --git a/prompts/agent.system.tool.call_sub.py b/prompts/agent.system.tool.call_sub.py index e840cca60e..946bd7f321 100644 --- a/prompts/agent.system.tool.call_sub.py +++ b/prompts/agent.system.tool.call_sub.py @@ -1,31 +1,34 @@ import json -from typing import Any +from typing import Any, TYPE_CHECKING from python.helpers.files import VariablesPlugin -from python.helpers import files +from python.helpers import files, projects, subagents from python.helpers.print_style import PrintStyle +if TYPE_CHECKING: + from agent import Agent -class CallSubordinate(VariablesPlugin): - def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]: - # collect all prompt profiles from subdirectories (_context.md file) - profiles = [] - agent_subdirs = files.get_subdirectories("agents", exclude=["_example"]) - for agent_subdir in agent_subdirs: - try: - context = files.read_prompt_file( - "_context.md", - [files.get_abs_path("agents", agent_subdir)] - ) - profiles.append({"name": agent_subdir, "context": context}) - except Exception as e: - PrintStyle().error(f"Error loading agent profile '{agent_subdir}': {e}") +class CallSubordinate(VariablesPlugin): + def get_variables( + self, file: str, backup_dirs: list[str] | None = None, **kwargs + ) -> dict[str, Any]: - # in case of no profiles - if not profiles: - # PrintStyle().error("No agent profiles found") - profiles = [ - {"name": "default", "context": "Default Agent-Zero AI Assistant"} - ] + # current agent instance + agent: Agent | None = kwargs.get("_agent", None) + # current project + project = projects.get_context_project_name(agent.context) if agent else None + # available agents in project (or global) + agents = subagents.get_available_agents_dict(project) - return {"agent_profiles": profiles} + if agents: + profiles = {} + for name, subagent in agents.items(): + profiles[name] = { + "title": subagent.title, + "description": subagent.description, + "context": subagent.context, + } + return {"agent_profiles": profiles} + else: + return {"agent_profiles": None} + diff --git a/prompts/agent.system.tool.document_query.md b/prompts/agent.system.tool.document_query.md index e6ca444326..9cbc51db30 100644 --- a/prompts/agent.system.tool.document_query.md +++ b/prompts/agent.system.tool.document_query.md @@ -1,60 +1,62 @@ -### document_query: -This tool can be used to read or analyze remote and local documents. -It can be used to: - * Get webpage or remote document text content - * Get local document text content - * Answer queries about a webpage, remote or local document -By default, when the "queries" argument is empty, this tool returns the text content of the document retrieved using OCR. -Additionally, you can pass a list of "queries" - in this case, the tool returns the answers to all the passed queries about the document. -!!! This is a universal document reader qnd query tool -!!! Supported document formats: HTML, PDF, Office Documents (word,excel, powerpoint), Textfiles and many more. +### document_query +read and analyze remote/local documents get text content or answer questions +pass a single url/path or a list for multiple documents in "document" +for web documents use "http://" or "https://"" prefix +for local files "file://" prefix is optional but full path is required +if "queries" is empty tool returns document content +if "queries" is a list of strings tool returns answers +supports various formats HTML PDF Office Text etc +usage: -#### Arguments: - * "document" (string) : The web address or local path to the document in question. Webdocuments need "http://" or "https://" protocol prefix. For local files the "file:" protocol prefix is optional. Local files MUST be passed with full filesystem path. - * "queries" (Optional, list[str]) : Optionally, here you can pass one or more queries to be answered (using and/or about) the document - -#### Usage example 1: -##### Request: -```json +1 get content +~~~json { "thoughts": [ - "...", + "I need to read..." ], - "headline": "Reading web document content", + "headline": "...", "tool_name": "document_query", "tool_args": { - "document": "https://...somexample", + "document": "https://.../document" } } -``` -##### Response: -```plaintext -... Here is the entire content of the web document requested ... -``` +~~~ -#### Usage example 2: -##### Request: -```json +2 query document +~~~json { "thoughts": [ - "...", + "I need to answer..." ], - "headline": "Analyzing document to answer specific questions", + "headline": "...", "tool_name": "document_query", "tool_args": { - "document": "https://...somexample", + "document": "https://.../document", "queries": [ - "What is the topic?", - "Who is the audience?" + "What is...", + "Who is..." ] } } -``` -##### Response: -```plaintext -# What is the topic? -... Description of the document topic ... +~~~ -# Who is the audience? -... The intended document audience list with short descriptions ... -``` +3 query multiple documents +~~~json +{ + "thoughts": [ + "I need to compare..." + ], + "headline": "...", + "tool_name": "document_query", + "tool_args": { + "document": [ + "https://.../document-one", + "file:///path/to/document-two" + ], + "queries": [ + "Compare the main conclusions...", + "What are the key differences..." + ] + } +} +~~~ diff --git a/prompts/agent.system.tool.wait.md b/prompts/agent.system.tool.wait.md new file mode 100644 index 0000000000..e8a30b0965 --- /dev/null +++ b/prompts/agent.system.tool.wait.md @@ -0,0 +1,34 @@ +### wait +pause execution for a set time or until a timestamp +use args "seconds" "minutes" "hours" "days" for duration +use "until" with ISO timestamp for a specific time +usage: + +1 wait duration +~~~json +{ + "thoughts": [ + "I need to wait..." + ], + "headline": "...", + "tool_name": "wait", + "tool_args": { + "minutes": 1, + "seconds": 30 + } +} +~~~ + +2 wait timestamp +~~~json +{ + "thoughts": [ + "I will wait until..." + ], + "headline": "...", + "tool_name": "wait", + "tool_args": { + "until": "2025-10-20T10:00:00Z" + } +} +~~~ diff --git a/prompts/agent.system.tools.py b/prompts/agent.system.tools.py index bfbe150779..f94544b304 100644 --- a/prompts/agent.system.tools.py +++ b/prompts/agent.system.tools.py @@ -5,8 +5,8 @@ from python.helpers.print_style import PrintStyle -class CallSubordinate(VariablesPlugin): - def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]: +class BuidToolsPrompt(VariablesPlugin): + def get_variables(self, file: str, backup_dirs: list[str] | None = None, **kwargs) -> dict[str, Any]: # collect all prompt folders in order of their priority folder = files.get_abs_path(os.path.dirname(file)) @@ -22,7 +22,7 @@ def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict tools = [] for prompt_file in prompt_files: try: - tool = files.read_prompt_file(prompt_file) + tool = files.read_prompt_file(prompt_file, **kwargs) tools.append(tool) except Exception as e: PrintStyle().error(f"Error loading tool '{prompt_file}': {e}") diff --git a/prompts/fw.code.running.md b/prompts/fw.code.running.md new file mode 100644 index 0000000000..94a8e93ec6 --- /dev/null +++ b/prompts/fw.code.running.md @@ -0,0 +1 @@ +Terminal session {{session}} is still running. Decide to wait for more 'output', 'reset', or use another session number based on situation. \ No newline at end of file diff --git a/prompts/fw.msg_critical_error.md b/prompts/fw.msg_critical_error.md new file mode 100644 index 0000000000..0bdeda139e --- /dev/null +++ b/prompts/fw.msg_critical_error.md @@ -0,0 +1 @@ +This error has occurred: {{error_message}}. Proceed with your original task if possible. \ No newline at end of file diff --git a/prompts/fw.wait_complete.md b/prompts/fw.wait_complete.md new file mode 100644 index 0000000000..3b6d6124de --- /dev/null +++ b/prompts/fw.wait_complete.md @@ -0,0 +1 @@ +Wait complete. Reached {{target_time}}. \ No newline at end of file diff --git a/python/api/api_log_get.py b/python/api/api_log_get.py index c09fdfdc0a..8111dbea5c 100644 --- a/python/api/api_log_get.py +++ b/python/api/api_log_get.py @@ -32,7 +32,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: return Response('{"error": "context_id is required"}', status=400, mimetype="application/json") # Get context - context = AgentContext.get(context_id) + context = AgentContext.use(context_id) if not context: return Response('{"error": "Context not found"}', status=404, mimetype="application/json") diff --git a/python/api/api_message.py b/python/api/api_message.py index 4d3248c60a..c25cb53694 100644 --- a/python/api/api_message.py +++ b/python/api/api_message.py @@ -3,8 +3,9 @@ from datetime import datetime, timedelta from agent import AgentContext, UserMessage, AgentContextType from python.helpers.api import ApiHandler, Request, Response -from python.helpers import files +from python.helpers import files, projects from python.helpers.print_style import PrintStyle +from python.helpers.projects import activate_project from werkzeug.utils import secure_filename from initialize import initialize_agent import threading @@ -33,6 +34,13 @@ async def process(self, input: dict, request: Request) -> dict | Response: message = input.get("message", "") attachments = input.get("attachments", []) lifetime_hours = input.get("lifetime_hours", 24) # Default 24 hours + project_name = input.get("project_name", None) + agent_profile = input.get("agent_profile", None) + + # Set an agent if profile provided + override_settings = {} + if agent_profile: + override_settings["agent_profile"] = agent_profile if not message: return Response('{"error": "Message is required"}', status=400, mimetype="application/json") @@ -68,13 +76,44 @@ async def process(self, input: dict, request: Request) -> dict | Response: # Get or create context if context_id: - context = AgentContext.get(context_id) + context = AgentContext.use(context_id) if not context: return Response('{"error": "Context not found"}', status=404, mimetype="application/json") + + # Validation: if agent profile is provided, it must match the exising + if agent_profile and context.agent0.config.profile != agent_profile: + return Response('{"error": "Cannot override agent profile on existing context"}', status=400, mimetype="application/json") + + + # Validation: if project is provided but context already has different project + existing_project = context.get_data(projects.CONTEXT_DATA_KEY_PROJECT) + if project_name and existing_project and existing_project != project_name: + return Response('{"error": "Project can only be set on first message"}', status=400, mimetype="application/json") else: - config = initialize_agent() + config = initialize_agent(override_settings=override_settings) context = AgentContext(config=config, type=AgentContextType.USER) + AgentContext.use(context.id) context_id = context.id + # Activate project if provided + if project_name: + try: + activate_project(context_id, project_name) + except Exception as e: + # Handle project or context errors more gracefully + error_msg = str(e) + PrintStyle.error(f"Failed to activate project '{project_name}' for context '{context_id}': {error_msg}") + return Response( + f'{{"error": "Failed to activate project \\"{project_name}\\""}}', + status=500, + mimetype="application/json", + ) + + # Activate project if provided + if project_name: + try: + projects.activate_project(context_id, project_name) + except Exception as e: + return Response(f'{{"error": "Failed to activate project: {str(e)}"}}', status=400, mimetype="application/json") # Update chat lifetime with self._cleanup_lock: diff --git a/python/api/api_reset_chat.py b/python/api/api_reset_chat.py index b497adb1e0..bf0a10f8a3 100644 --- a/python/api/api_reset_chat.py +++ b/python/api/api_reset_chat.py @@ -35,7 +35,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: ) # Check if context exists - context = AgentContext.get(context_id) + context = AgentContext.use(context_id) if not context: return Response( '{"error": "Chat context not found"}', diff --git a/python/api/api_terminate_chat.py b/python/api/api_terminate_chat.py index 3d6bcb4b4c..e746d84c5f 100644 --- a/python/api/api_terminate_chat.py +++ b/python/api/api_terminate_chat.py @@ -35,7 +35,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: ) # Check if context exists - context = AgentContext.get(context_id) + context = AgentContext.use(context_id) if not context: return Response( '{"error": "Chat context not found"}', diff --git a/python/api/banners.py b/python/api/banners.py new file mode 100644 index 0000000000..88b171b8e2 --- /dev/null +++ b/python/api/banners.py @@ -0,0 +1,22 @@ +from python.helpers.api import ApiHandler, Request, Response +from python.helpers.extension import call_extensions + + +class GetBanners(ApiHandler): + """ + API endpoint for Welcome Screen banners. + Add checks as extension scripts in python/extensions/banners/ or usr/extensions/banners/ + """ + + async def process(self, input: dict, request: Request) -> dict | Response: + banners = input.get("banners", []) + frontend_context = input.get("context", {}) + + # Banners array passed by reference - extensions append directly to it + await call_extensions("banners", agent=None, banners=banners, frontend_context=frontend_context) + + return {"banners": banners} + + @classmethod + def get_methods(cls) -> list[str]: + return ["POST"] diff --git a/python/api/chat_create.py b/python/api/chat_create.py new file mode 100644 index 0000000000..f73f3416d8 --- /dev/null +++ b/python/api/chat_create.py @@ -0,0 +1,32 @@ +from python.helpers.api import ApiHandler, Input, Output, Request, Response + + +from python.helpers import projects, guids +from agent import AgentContext + + +class CreateChat(ApiHandler): + async def process(self, input: Input, request: Request) -> Output: + current_ctxid = input.get("current_context", "") # current context id + new_ctxid = input.get("new_context", guids.generate_id()) # given or new guid + + # context instance - get or create + current_context = AgentContext.get(current_ctxid) + + # get/create new context + new_context = self.use_context(new_ctxid) + + # copy selected data from current to new context + if current_context: + current_data_1 = current_context.get_data(projects.CONTEXT_DATA_KEY_PROJECT) + if current_data_1: + new_context.set_data(projects.CONTEXT_DATA_KEY_PROJECT, current_data_1) + current_data_2 = current_context.get_output_data(projects.CONTEXT_DATA_KEY_PROJECT) + if current_data_2: + new_context.set_output_data(projects.CONTEXT_DATA_KEY_PROJECT, current_data_2) + + return { + "ok": True, + "ctxid": new_context.id, + "message": "Context created.", + } diff --git a/python/api/chat_export.py b/python/api/chat_export.py index 2817a4ea85..a82be6483e 100644 --- a/python/api/chat_export.py +++ b/python/api/chat_export.py @@ -8,7 +8,7 @@ async def process(self, input: Input, request: Request) -> Output: if not ctxid: raise Exception("No context id provided") - context = self.get_context(ctxid) + context = self.use_context(ctxid) content = persist_chat.export_json_chat(context) return { "message": "Chats exported.", diff --git a/python/api/chat_files_path_get.py b/python/api/chat_files_path_get.py new file mode 100644 index 0000000000..b230d31f73 --- /dev/null +++ b/python/api/chat_files_path_get.py @@ -0,0 +1,23 @@ +from python.helpers.api import ApiHandler, Request, Response +from python.helpers import files, memory, notification, projects, notification, runtime +import os +from werkzeug.utils import secure_filename + + +class GetChatFilesPath(ApiHandler): + async def process(self, input: dict, request: Request) -> dict | Response: + ctxid = input.get("ctxid", "") + if not ctxid: + raise Exception("No context id provided") + context = self.use_context(ctxid) + + project_name = projects.get_context_project_name(context) + if project_name: + folder = files.normalize_a0_path(projects.get_project_folder(project_name)) + else: + folder = "/root" # root in container + + return { + "ok": True, + "path": folder, + } \ No newline at end of file diff --git a/python/api/chat_remove.py b/python/api/chat_remove.py index a0b186e140..671e43d9ea 100644 --- a/python/api/chat_remove.py +++ b/python/api/chat_remove.py @@ -8,7 +8,10 @@ class RemoveChat(ApiHandler): async def process(self, input: Input, request: Request) -> Output: ctxid = input.get("context", "") - context = AgentContext.get(ctxid) + scheduler = TaskScheduler.get() + scheduler.cancel_tasks_by_context(ctxid, terminate_thread=True) + + context = AgentContext.use(ctxid) if context: # stop processing any tasks context.reset() @@ -16,7 +19,6 @@ async def process(self, input: Input, request: Request) -> Output: AgentContext.remove(ctxid) persist_chat.remove_chat(ctxid) - scheduler = TaskScheduler.get() await scheduler.reload() tasks = scheduler.get_tasks_by_context_id(ctxid) diff --git a/python/api/chat_reset.py b/python/api/chat_reset.py index 5086aacc1a..668b08e268 100644 --- a/python/api/chat_reset.py +++ b/python/api/chat_reset.py @@ -2,14 +2,18 @@ from python.helpers import persist_chat +from python.helpers.task_scheduler import TaskScheduler class Reset(ApiHandler): async def process(self, input: Input, request: Request) -> Output: ctxid = input.get("context", "") + # attempt to stop any scheduler tasks bound to this context + TaskScheduler.get().cancel_tasks_by_context(ctxid, terminate_thread=True) + # context instance - get or create - context = self.get_context(ctxid) + context = self.use_context(ctxid) context.reset() persist_chat.save_tmp_chat(context) persist_chat.remove_msg_files(ctxid) diff --git a/python/api/csrf_token.py b/python/api/csrf_token.py index bd8615546c..f4d1d63c0f 100644 --- a/python/api/csrf_token.py +++ b/python/api/csrf_token.py @@ -1,4 +1,5 @@ import secrets +from urllib.parse import urlparse from python.helpers.api import ( ApiHandler, Input, @@ -7,7 +8,11 @@ Response, session, ) -from python.helpers import runtime +from python.helpers import runtime, dotenv, login +import fnmatch + +ALLOWED_ORIGINS_KEY = "ALLOWED_ORIGINS" + class GetCsrfToken(ApiHandler): @@ -20,6 +25,124 @@ def requires_csrf(cls) -> bool: return False async def process(self, input: Input, request: Request) -> Output: + + # check for allowed origin to prevent dns rebinding attacks + origin_check = await self.check_allowed_origin(request) + if not origin_check["ok"]: + return { + "ok": False, + "error": f"Origin {self.get_origin_from_request(request)} not allowed when login is disabled. Set login and password or add your URL to ALLOWED_ORIGINS env variable. Currently allowed origins: {','.join(origin_check['allowed_origins'])}", + } + + # generate a csrf token if it doesn't exist if "csrf_token" not in session: session["csrf_token"] = secrets.token_urlsafe(32) - return {"token": session["csrf_token"], "runtime_id": runtime.get_runtime_id()} + + # return the csrf token and runtime id + return { + "ok": True, + "token": session["csrf_token"], + "runtime_id": runtime.get_runtime_id(), + } + + async def check_allowed_origin(self, request: Request): + # if login is required, this check is unnecessary + if login.is_login_required(): + return {"ok": True, "origin": "", "allowed_origins": ""} + # initialize allowed origins if not yet set + self.initialize_allowed_origins(request) + # otherwise, check if the origin is allowed + return await self.is_allowed_origin(request) + + async def is_allowed_origin(self, request: Request): + # get the origin from the request + origin = self.get_origin_from_request(request) + if not origin: + return {"ok": False, "origin": "", "allowed_origins": ""} + + # list of allowed origins + allowed_origins = await self.get_allowed_origins() + + # check if the origin is allowed + match = any( + fnmatch.fnmatch(origin, allowed_origin) + for allowed_origin in allowed_origins + ) + return {"ok": match, "origin": origin, "allowed_origins": allowed_origins} + + + def get_origin_from_request(self, request: Request): + # get from origin + r = request.headers.get("Origin") or request.environ.get("HTTP_ORIGIN") + if not r: + # try referer if origin not present + r = ( + request.headers.get("Referer") + or request.referrer + or request.environ.get("HTTP_REFERER") + ) + if not r: + return None + # parse and normalize + p = urlparse(r) + if not p.scheme or not p.hostname: + return None + return f"{p.scheme}://{p.hostname}" + (f":{p.port}" if p.port else "") + + async def get_allowed_origins(self) -> list[str]: + # get the allowed origins from the environment + allowed_origins = [ + origin.strip() + for origin in (dotenv.get_dotenv_value(ALLOWED_ORIGINS_KEY) or "").split(",") + if origin.strip() + ] + + # if there are no allowed origins, allow default localhosts + if not allowed_origins: + allowed_origins = self.get_default_allowed_origins() + + # always allow tunnel url if running + try: + from python.api.tunnel_proxy import process as tunnel_api_process + + tunnel = await tunnel_api_process({"action": "get"}) + if tunnel and isinstance(tunnel, dict) and tunnel["success"]: + allowed_origins.append(tunnel["tunnel_url"]) + except Exception: + pass + + return allowed_origins + + def get_default_allowed_origins(self) -> list[str]: + return ["*://localhost:*", "*://127.0.0.1:*", "*://0.0.0.0:*"] + + def initialize_allowed_origins(self, request: Request): + """ + If A0 is hosted on a server, add the first visit origin to ALLOWED_ORIGINS. + This simplifies deployment process as users can access their new instance without + additional setup while keeping it secure. + """ + # dotenv value is already set, do nothing + denv = dotenv.get_dotenv_value(ALLOWED_ORIGINS_KEY) + if denv: + return + + # get the origin from the request + req_origin = self.get_origin_from_request(request) + if not req_origin: + return + + # check if the origin is allowed by default + allowed_origins = self.get_default_allowed_origins() + match = any( + fnmatch.fnmatch(req_origin, allowed_origin) + for allowed_origin in allowed_origins + ) + if match: + return + + # if not, add it to the allowed origins + allowed_origins.append(req_origin) + dotenv.save_dotenv_value(ALLOWED_ORIGINS_KEY, ",".join(allowed_origins)) + + \ No newline at end of file diff --git a/python/api/ctx_window_get.py b/python/api/ctx_window_get.py index 16a4438b7a..46573cb608 100644 --- a/python/api/ctx_window_get.py +++ b/python/api/ctx_window_get.py @@ -6,7 +6,7 @@ class GetCtxWindow(ApiHandler): async def process(self, input: Input, request: Request) -> Output: ctxid = input.get("context", []) - context = self.get_context(ctxid) + context = self.use_context(ctxid) agent = context.streaming_agent or context.agent0 window = agent.get_data(agent.DATA_NAME_CTX_WINDOW) if not window or not isinstance(window, dict): diff --git a/python/api/download_work_dir_file.py b/python/api/download_work_dir_file.py index 454ae5a12f..747c6a4940 100644 --- a/python/api/download_work_dir_file.py +++ b/python/api/download_work_dir_file.py @@ -7,6 +7,8 @@ from python.helpers.api import ApiHandler, Input, Output, Request from python.helpers import files, runtime from python.api import file_info +from urllib.parse import quote + def stream_file_download(file_source, download_name, chunk_size=8192): @@ -63,7 +65,7 @@ def generate(): content_type=content_type, direct_passthrough=True, # Prevent Flask from buffering the response headers={ - 'Content-Disposition': f'attachment; filename="{download_name}"', + 'Content-Disposition': make_disposition(download_name), 'Content-Length': str(file_size), # Critical for browser progress bars 'Cache-Control': 'no-cache', 'X-Accel-Buffering': 'no', # Disable nginx buffering @@ -74,6 +76,15 @@ def generate(): return response +def make_disposition(download_name: str) -> str: + # Basic ASCII fallback (strip or replace weird chars) + ascii_fallback = download_name.encode("ascii", "ignore").decode("ascii") or "download" + utf8_name = quote(download_name) # URL-encode UTF-8 bytes + + # RFC 5987: filename* with UTF-8 + return f'attachment; filename="{ascii_fallback}"; filename*=UTF-8\'\'{utf8_name}' + + class DownloadFile(ApiHandler): @classmethod diff --git a/python/api/get_work_dir_files.py b/python/api/get_work_dir_files.py index 1783e3d8a0..13cd428d4a 100644 --- a/python/api/get_work_dir_files.py +++ b/python/api/get_work_dir_files.py @@ -1,6 +1,6 @@ from python.helpers.api import ApiHandler, Request, Response from python.helpers.file_browser import FileBrowser -from python.helpers import runtime +from python.helpers import runtime, files class GetWorkDirFiles(ApiHandler): @@ -15,7 +15,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: # current_path = "work_dir" # else: # current_path = "root" - current_path = "root" + current_path = "/a0" # browser = FileBrowser() # result = browser.get_files(current_path) diff --git a/python/api/history_get.py b/python/api/history_get.py index 7ff40e3fe9..608a523ecc 100644 --- a/python/api/history_get.py +++ b/python/api/history_get.py @@ -4,7 +4,7 @@ class GetHistory(ApiHandler): async def process(self, input: dict, request: Request) -> dict | Response: ctxid = input.get("context", []) - context = self.get_context(ctxid) + context = self.use_context(ctxid) agent = context.streaming_agent or context.agent0 history = agent.history.output_text() size = agent.history.get_tokens() diff --git a/python/api/import_knowledge.py b/python/api/import_knowledge.py index fd1b26542d..bfc25b6490 100644 --- a/python/api/import_knowledge.py +++ b/python/api/import_knowledge.py @@ -13,7 +13,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: if not ctxid: raise Exception("No context id provided") - context = self.get_context(ctxid) + context = self.use_context(ctxid) file_list = request.files.getlist("files[]") KNOWLEDGE_FOLDER = files.get_abs_path(memory.get_custom_knowledge_subdir_abs(context.agent0), "main") diff --git a/python/api/knowledge_path_get.py b/python/api/knowledge_path_get.py new file mode 100644 index 0000000000..dadf0a692b --- /dev/null +++ b/python/api/knowledge_path_get.py @@ -0,0 +1,25 @@ +from python.helpers.api import ApiHandler, Request, Response +from python.helpers import files, memory, notification, projects, notification +import os +from werkzeug.utils import secure_filename + + +class GetKnowledgePath(ApiHandler): + async def process(self, input: dict, request: Request) -> dict | Response: + ctxid = input.get("ctxid", "") + if not ctxid: + raise Exception("No context id provided") + context = self.use_context(ctxid) + + project_name = projects.get_context_project_name(context) + if project_name: + knowledge_folder = projects.get_project_meta_folder(project_name, "knowledge") + else: + knowledge_folder = memory.get_custom_knowledge_subdir_abs(context.agent0) + + knowledge_folder = files.normalize_a0_path(knowledge_folder) + + return { + "ok": True, + "path": knowledge_folder, + } \ No newline at end of file diff --git a/python/api/knowledge_reindex.py b/python/api/knowledge_reindex.py new file mode 100644 index 0000000000..b86eafeaf1 --- /dev/null +++ b/python/api/knowledge_reindex.py @@ -0,0 +1,21 @@ +from python.helpers.api import ApiHandler, Request, Response +from python.helpers import files, memory, notification, projects, notification +import os +from werkzeug.utils import secure_filename + + +class ReindexKnowledge(ApiHandler): + async def process(self, input: dict, request: Request) -> dict | Response: + ctxid = input.get("ctxid", "") + if not ctxid: + raise Exception("No context id provided") + context = self.use_context(ctxid) + + # reload memory to re-import knowledge + await memory.Memory.reload(context.agent0) + context.log.set_initial_progress() + + return { + "ok": True, + "message": "Knowledge re-indexed", + } diff --git a/python/api/memory_dashboard.py b/python/api/memory_dashboard.py index a494227c8b..d1275fe417 100644 --- a/python/api/memory_dashboard.py +++ b/python/api/memory_dashboard.py @@ -1,8 +1,9 @@ from python.helpers.api import ApiHandler, Request, Response -from python.helpers.memory import Memory +from python.helpers.memory import Memory, get_existing_memory_subdirs, get_context_memory_subdir from python.helpers import files from models import ModelConfig, ModelType from langchain_core.documents import Document +from agent import AgentContext class MemoryDashboard(ApiHandler): @@ -113,21 +114,13 @@ async def _get_current_memory_subdir(self, input: dict) -> dict: # Fallback to default if no context available return {"success": True, "memory_subdir": "default"} - # Import AgentContext here to avoid circular imports - from agent import AgentContext - - # Get the context and extract memory subdirectory - context = AgentContext.get(context_id) - if ( - context - and hasattr(context, "config") - and hasattr(context.config, "memory_subdir") - ): - memory_subdir = context.config.memory_subdir or "default" - return {"success": True, "memory_subdir": memory_subdir} - else: + context = AgentContext.use(context_id) + if not context: return {"success": True, "memory_subdir": "default"} + memory_subdir = get_context_memory_subdir(context) + return {"success": True, "memory_subdir": memory_subdir or "default"} + except Exception: return { "success": True, # Still success, just fallback to default @@ -138,12 +131,7 @@ async def _get_memory_subdirs(self) -> dict: """Get available memory subdirectories.""" try: # Get subdirectories from memory folder - subdirs = files.get_subdirectories("memory", exclude="embeddings") - - # Ensure 'default' is always available - if "default" not in subdirs: - subdirs.insert(0, "default") - + subdirs = get_existing_memory_subdirs() return {"success": True, "subdirs": subdirs} except Exception as e: return { diff --git a/python/api/message.py b/python/api/message.py index 2f88f4784d..bd378e4f79 100644 --- a/python/api/message.py +++ b/python/api/message.py @@ -1,7 +1,7 @@ from agent import AgentContext, UserMessage from python.helpers.api import ApiHandler, Request, Response -from python.helpers import files +from python.helpers import files, extension import os from werkzeug.utils import secure_filename from python.helpers.defer import DeferredTask @@ -53,7 +53,13 @@ async def communicate(self, input: dict, request: Request): message = text # Obtain agent context - context = self.get_context(ctxid) + context = self.use_context(ctxid) + + # call extension point, alow it to modify data + data = { "message": message, "attachment_paths": attachment_paths } + await extension.call_extensions("user_message_ui", agent=context.get_agent(), data=data) + message = data.get("message", "") + attachment_paths = data.get("attachment_paths", []) # Store attachments in agent data # context.agent0.set_data("attachments", attachment_paths) diff --git a/python/api/nudge.py b/python/api/nudge.py index 64683da070..558734cdf0 100644 --- a/python/api/nudge.py +++ b/python/api/nudge.py @@ -6,7 +6,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: if not ctxid: raise Exception("No context id provided") - context = self.get_context(ctxid) + context = self.use_context(ctxid) context.nudge() msg = "Process reset, agent nudged." diff --git a/python/api/pause.py b/python/api/pause.py index e4b20ecfb7..47b444e802 100644 --- a/python/api/pause.py +++ b/python/api/pause.py @@ -8,7 +8,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: ctxid = input.get("context", "") # context instance - get or create - context = self.get_context(ctxid) + context = self.use_context(ctxid) context.paused = paused diff --git a/python/api/poll.py b/python/api/poll.py index 698321cbd4..dbe7105c66 100644 --- a/python/api/poll.py +++ b/python/api/poll.py @@ -18,10 +18,17 @@ async def process(self, input: dict, request: Request) -> dict | Response: timezone = input.get("timezone", get_dotenv_value("DEFAULT_USER_TIMEZONE", "UTC")) Localization.get().set_timezone(timezone) - # context instance - get or create - context = self.get_context(ctxid) - - logs = context.log.output(start=from_no) + # context instance - get or create only if ctxid is provided + if ctxid: + try: + context = self.use_context(ctxid, create_if_not_exists=False) + except Exception as e: + context = None + else: + context = None + + # Get logs only if we have a context + logs = context.log.output(start=from_no) if context else [] # Get notifications from global notification manager notification_manager = AgentContext.get_notification_manager() @@ -54,7 +61,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: continue # Create the base context data that will be returned - context_data = ctx.serialize() + context_data = ctx.output() context_task = scheduler.get_task_by_uuid(ctx.id) # Determine if this is a task-dedicated context by checking if a task with this UUID exists @@ -102,15 +109,16 @@ async def process(self, input: dict, request: Request) -> dict | Response: # data from this server return { - "context": context.id, + "deselect_chat": ctxid and not context, + "context": context.id if context else "", "contexts": ctxs, "tasks": tasks, "logs": logs, - "log_guid": context.log.guid, - "log_version": len(context.log.updates), - "log_progress": context.log.progress, - "log_progress_active": context.log.progress_active, - "paused": context.paused, + "log_guid": context.log.guid if context else "", + "log_version": len(context.log.updates) if context else 0, + "log_progress": context.log.progress if context else 0, + "log_progress_active": context.log.progress_active if context else False, + "paused": context.paused if context else False, "notifications": notifications, "notifications_guid": notification_manager.guid, "notifications_version": len(notification_manager.updates), diff --git a/python/api/projects.py b/python/api/projects.py new file mode 100644 index 0000000000..3e06fadcdb --- /dev/null +++ b/python/api/projects.py @@ -0,0 +1,91 @@ +from python.helpers.api import ApiHandler, Input, Output, Request, Response +from python.helpers import projects + + +class Projects(ApiHandler): + async def process(self, input: Input, request: Request) -> Output: + action = input.get("action", "") + ctxid = input.get("context_id", None) + + if ctxid: + _context = self.use_context(ctxid) + + try: + if action == "list": + data = self.get_active_projects_list() + elif action == "load": + data = self.load_project(input.get("name", None)) + elif action == "create": + data = self.create_project(input.get("project", None)) + elif action == "update": + data = self.update_project(input.get("project", None)) + elif action == "delete": + data = self.delete_project(input.get("name", None)) + elif action == "activate": + data = self.activate_project(ctxid, input.get("name", None)) + elif action == "deactivate": + data = self.deactivate_project(ctxid) + elif action == "file_structure": + data = self.get_file_structure(input.get("name", None), input.get("settings")) + else: + raise Exception("Invalid action") + + return { + "ok": True, + "data": data, + } + except Exception as e: + return { + "ok": False, + "error": str(e), + } + + def get_active_projects_list(self): + return projects.get_active_projects_list() + + def create_project(self, project: dict|None): + if project is None: + raise Exception("Project data is required") + data = projects.BasicProjectData(**project) + name = projects.create_project(project["name"], data) + return projects.load_edit_project_data(name) + + def load_project(self, name: str|None): + if name is None: + raise Exception("Project name is required") + return projects.load_edit_project_data(name) + + def update_project(self, project: dict|None): + if project is None: + raise Exception("Project data is required") + data = projects.EditProjectData(**project) + name = projects.update_project(project["name"], data) + return projects.load_edit_project_data(name) + + def delete_project(self, name: str|None): + if name is None: + raise Exception("Project name is required") + return projects.delete_project(name) + + def activate_project(self, context_id: str|None, name: str|None): + if not context_id: + raise Exception("Context ID is required") + if not name: + raise Exception("Project name is required") + return projects.activate_project(context_id, name) + + def deactivate_project(self, context_id: str|None): + if not context_id: + raise Exception("Context ID is required") + return projects.deactivate_project(context_id) + + def get_file_structure(self, name: str|None, settings: dict|None): + if not name: + raise Exception("Project name is required") + # project data + basic_data = projects.load_basic_project_data(name) + # override file structure settings + if settings: + basic_data["file_structure"] = settings # type: ignore + # get structure + return projects.get_file_structure(name, basic_data) \ No newline at end of file diff --git a/python/api/scheduler_task_create.py b/python/api/scheduler_task_create.py index c091b3b198..48aeb24e89 100644 --- a/python/api/scheduler_task_create.py +++ b/python/api/scheduler_task_create.py @@ -3,6 +3,7 @@ TaskScheduler, ScheduledTask, AdHocTask, PlannedTask, TaskSchedule, serialize_task, parse_task_schedule, parse_task_plan, TaskType ) +from python.helpers.projects import load_basic_project_data from python.helpers.localization import Localization from python.helpers.print_style import PrintStyle import random @@ -27,7 +28,26 @@ async def process(self, input: Input, request: Request) -> Output: system_prompt = input.get("system_prompt", "") prompt = input.get("prompt") attachments = input.get("attachments", []) - context_id = input.get("context_id", None) + + requested_project_slug = input.get("project_name") + if isinstance(requested_project_slug, str): + requested_project_slug = requested_project_slug.strip() or None + else: + requested_project_slug = None + + project_slug = requested_project_slug + project_color = None + + if project_slug: + try: + metadata = load_basic_project_data(requested_project_slug) + project_color = metadata.get("color") or None + except Exception as exc: + printer.error(f"SchedulerTaskCreate: failed to load project '{project_slug}': {exc}") + return {"error": f"Saving project failed: {project_slug}"} + + # Always dedicated context for scheduler tasks created by ui + task_context_id = None # Check if schedule is provided (for ScheduledTask) schedule = input.get("schedule", {}) @@ -77,8 +97,10 @@ async def process(self, input: Input, request: Request) -> Output: prompt=prompt, schedule=task_schedule, attachments=attachments, - context_id=context_id, - timezone=timezone + context_id=task_context_id, + timezone=timezone, + project_name=project_slug, + project_color=project_color, ) elif plan: # Create a planned task @@ -94,7 +116,9 @@ async def process(self, input: Input, request: Request) -> Output: prompt=prompt, plan=task_plan, attachments=attachments, - context_id=context_id + context_id=task_context_id, + project_name=project_slug, + project_color=project_color, ) else: # Create an ad-hoc task @@ -105,7 +129,9 @@ async def process(self, input: Input, request: Request) -> Output: prompt=prompt, token=token, attachments=attachments, - context_id=context_id + context_id=task_context_id, + project_name=project_slug, + project_color=project_color, ) # Verify token after creation if isinstance(task, AdHocTask): @@ -132,5 +158,6 @@ async def process(self, input: Input, request: Request) -> Output: printer.print(f"Serialized adhoc task, token in response: '{task_dict.get('token')}'") return { + "ok": True, "task": task_dict } diff --git a/python/api/scheduler_task_delete.py b/python/api/scheduler_task_delete.py index 59e7187992..5e41a0bd61 100644 --- a/python/api/scheduler_task_delete.py +++ b/python/api/scheduler_task_delete.py @@ -30,10 +30,11 @@ async def process(self, input: Input, request: Request) -> Output: context = None if task.context_id: - context = self.get_context(task.context_id) + context = self.use_context(task.context_id) # If the task is running, update its state to IDLE first if task.state == TaskState.RUNNING: + scheduler.cancel_running_task(task_id, terminate_thread=True) if context: context.reset() # Update the state to IDLE so any ongoing processes know to terminate diff --git a/python/api/scheduler_task_update.py b/python/api/scheduler_task_update.py index 433738652e..b5b73cb59a 100644 --- a/python/api/scheduler_task_update.py +++ b/python/api/scheduler_task_update.py @@ -48,6 +48,9 @@ async def process(self, input: Input, request: Request) -> Output: if "attachments" in input: update_params["attachments"] = input.get("attachments", []) + if "project_name" in input or "project_color" in input: + return {"error": "Project changes are not allowed"} + # Update schedule if this is a scheduled task and schedule is provided if isinstance(task, ScheduledTask) and "schedule" in input: schedule_data = input.get("schedule", {}) @@ -85,5 +88,6 @@ async def process(self, input: Input, request: Request) -> Output: task_dict = serialize_task(updated_task) return { + "ok": True, "task": task_dict } diff --git a/python/api/scheduler_tasks_list.py b/python/api/scheduler_tasks_list.py index 30a8c3f068..8d07235d23 100644 --- a/python/api/scheduler_tasks_list.py +++ b/python/api/scheduler_tasks_list.py @@ -22,8 +22,8 @@ async def process(self, input: Input, request: Request) -> Output: # Use the scheduler's convenience method for task serialization tasks_list = scheduler.serialize_all_tasks() - return {"tasks": tasks_list} + return {"ok": True, "tasks": tasks_list} except Exception as e: PrintStyle.error(f"Failed to list tasks: {str(e)} {traceback.format_exc()}") - return {"error": f"Failed to list tasks: {str(e)} {traceback.format_exc()}", "tasks": []} + return {"ok": False, "error": f"Failed to list tasks: {str(e)} {traceback.format_exc()}", "tasks": []} diff --git a/python/api/settings_get.py b/python/api/settings_get.py index 5b5bf95c7e..5285b4fd4b 100644 --- a/python/api/settings_get.py +++ b/python/api/settings_get.py @@ -4,8 +4,9 @@ class GetSettings(ApiHandler): async def process(self, input: dict, request: Request) -> dict | Response: - set = settings.convert_out(settings.get_settings()) - return {"settings": set} + backend = settings.get_settings() + out = settings.convert_out(backend) + return dict(out) @classmethod def get_methods(cls) -> list[str]: diff --git a/python/api/settings_refresh_models.py b/python/api/settings_refresh_models.py new file mode 100644 index 0000000000..dbccb5c229 --- /dev/null +++ b/python/api/settings_refresh_models.py @@ -0,0 +1,73 @@ +from typing import Any + +import models as models_module +from python.helpers.api import ApiHandler, Request, Response +from python.helpers.model_discovery import ( + get_models_for_provider, + clear_cache, +) + +# Placeholder used for masked API keys in UI +API_KEY_PLACEHOLDER = "************" + + +class RefreshModels(ApiHandler): + """ + API endpoint to dynamically fetch model options from provider APIs. + + Called when: + - User changes the provider dropdown + - User enters/updates an API key + - User explicitly requests a refresh + + Input: + model_type: "chat" or "embedding" + provider: Provider ID (e.g., "openai", "anthropic", "openrouter") + api_keys: Dictionary of API keys (may contain placeholders) + api_base: Optional custom API base URL for OpenAI-compatible providers + force_refresh: Optional, if True bypasses cache + clear_cache: Optional, if True clears all cache first + + Returns: + models: List of {value, label} options fetched from the provider's API + """ + + async def process( + self, input: dict[Any, Any], request: Request + ) -> dict[Any, Any] | Response: + model_type = input.get("model_type", "chat") + provider = input.get("provider", "") + api_keys_input = input.get("api_keys", {}) + api_base = input.get("api_base", "") + force_refresh = input.get("force_refresh", False) + should_clear_cache = input.get("clear_cache", False) + + # Handle cache clear request + if should_clear_cache: + clear_cache() + + if not provider: + return {"models": [{"value": "__custom__", "label": "Custom (enter manually)"}]} + + # Resolve actual API keys from environment when placeholders are passed + api_keys = {} + for prov, key in api_keys_input.items(): + if key == API_KEY_PLACEHOLDER or not key: + # Get actual key from environment + actual_key = models_module.get_api_key(prov) + if actual_key and actual_key != "None": + api_keys[prov] = actual_key + else: + # Use the provided key (user may have just entered a new one) + api_keys[prov] = key + + # Fetch models dynamically from provider API + models = await get_models_for_provider( + model_type=model_type, + provider=provider, + api_keys=api_keys, + api_base=api_base if api_base else None, + force_refresh=force_refresh, + ) + + return {"models": models} diff --git a/python/api/settings_set.py b/python/api/settings_set.py index c24a3cc66d..3213eada74 100644 --- a/python/api/settings_set.py +++ b/python/api/settings_set.py @@ -7,6 +7,8 @@ class SetSettings(ApiHandler): async def process(self, input: dict[Any, Any], request: Request) -> dict[Any, Any] | Response: - set = settings.convert_in(input) - set = settings.set_settings(set) - return {"settings": set} + frontend = input.get("settings", input) + backend = settings.convert_in(settings.Settings(**frontend)) + backend = settings.set_settings(backend) + out = settings.convert_out(backend) + return dict(out) diff --git a/python/api/subagents.py b/python/api/subagents.py new file mode 100644 index 0000000000..6f501ac76b --- /dev/null +++ b/python/api/subagents.py @@ -0,0 +1,58 @@ +from python.helpers.api import ApiHandler, Input, Output, Request, Response +from python.helpers import subagents +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from python.helpers import projects + +class Subagents(ApiHandler): + async def process(self, input: Input, request: Request) -> Output: + action = input.get("action", "") + ctxid = input.get("context_id", None) + + if ctxid: + _context = self.use_context(ctxid) + + try: + if action == "list": + data = self.get_subagents_list() + elif action == "load": + data = self.load_agent(input.get("name", None)) + elif action == "save": + data = self.save_agent(input.get("name", None), input.get("data", None)) + elif action == "delete": + data = self.delete_agent(input.get("name", None)) + else: + raise Exception("Invalid action") + + return { + "ok": True, + "data": data, + } + except Exception as e: + return { + "ok": False, + "error": str(e), + } + + def get_subagents_list(self): + return subagents.get_agents_list() + + def load_agent(self, name: str|None): + if name is None: + raise Exception("Subagent name is required") + return subagents.load_agent_data(name) + + def save_agent(self, name:str|None, data: dict|None): + if name is None: + raise Exception("Subagent name is required") + if data is None: + raise Exception("Subagent data is required") + subagent = subagents.SubAgent(**data) + subagents.save_agent_data(name, subagent) + return subagents.load_agent_data(name) + + def delete_agent(self, name: str|None): + if name is None: + raise Exception("Subagent name is required") + subagents.delete_agent_data(name) \ No newline at end of file diff --git a/python/api/synthesize.py b/python/api/synthesize.py index 071af8760f..87f93eeaa2 100644 --- a/python/api/synthesize.py +++ b/python/api/synthesize.py @@ -7,9 +7,11 @@ class Synthesize(ApiHandler): async def process(self, input: dict, request: Request) -> dict | Response: text = input.get("text", "") - # ctxid = input.get("ctxid", "") + ctxid = input.get("ctxid", "") - # context = self.get_context(ctxid) + if ctxid: + context = self.use_context(ctxid) + # if not await kokoro_tts.is_downloaded(): # context.log.log(type="info", content="Kokoro TTS model is currently being initialized, please wait...") diff --git a/python/api/transcribe.py b/python/api/transcribe.py index 8276a70a66..5abb9a0834 100644 --- a/python/api/transcribe.py +++ b/python/api/transcribe.py @@ -5,9 +5,11 @@ class Transcribe(ApiHandler): async def process(self, input: dict, request: Request) -> dict | Response: audio = input.get("audio") - # ctxid = input.get("ctxid", "") + ctxid = input.get("ctxid", "") + + if ctxid: + context = self.use_context(ctxid) - # context = self.get_context(ctxid) # if not await whisper.is_downloaded(): # context.log.log(type="info", content="Whisper STT model is currently being initialized, please wait...") diff --git a/python/api/tunnel.py b/python/api/tunnel.py index ca5d64f42d..edcf8ab55a 100644 --- a/python/api/tunnel.py +++ b/python/api/tunnel.py @@ -4,48 +4,62 @@ class Tunnel(ApiHandler): async def process(self, input: dict, request: Request) -> dict | Response: - action = input.get("action", "get") - - tunnel_manager = TunnelManager.get_instance() + return await process(input) - if action == "health": - return {"success": True} - - if action == "create": - port = runtime.get_web_ui_port() - provider = input.get("provider", "serveo") # Default to serveo - tunnel_url = tunnel_manager.start_tunnel(port, provider) - if tunnel_url is None: - # Add a little delay and check again - tunnel might be starting - import time - time.sleep(2) - tunnel_url = tunnel_manager.get_tunnel_url() - - return { - "success": tunnel_url is not None, - "tunnel_url": tunnel_url, - "message": "Tunnel creation in progress" if tunnel_url is None else "Tunnel created successfully" - } - - elif action == "stop": - return self.stop() - - elif action == "get": - tunnel_url = tunnel_manager.get_tunnel_url() +async def process(input: dict) -> dict | Response: + action = input.get("action", "get") + + tunnel_manager = TunnelManager.get_instance() + + if action == "health": + return {"success": True} + + if action == "create": + port = runtime.get_web_ui_port() + provider = input.get("provider", "serveo") # Default to serveo + tunnel_url = tunnel_manager.start_tunnel(port, provider) + error = tunnel_manager.get_last_error() + if error: return { - "success": tunnel_url is not None, - "tunnel_url": tunnel_url, - "is_running": tunnel_manager.is_running + "success": False, + "tunnel_url": None, + "message": error, + "notifications": tunnel_manager.get_notifications() } return { - "success": False, - "error": "Invalid action. Use 'create', 'stop', or 'get'." - } - - def stop(self): - tunnel_manager = TunnelManager.get_instance() - tunnel_manager.stop_tunnel() + "success": tunnel_url is not None, + "tunnel_url": tunnel_url, + "notifications": tunnel_manager.get_notifications() + } + + elif action == "stop": + return stop() + + elif action == "get": + tunnel_url = tunnel_manager.get_tunnel_url() return { - "success": True + "success": tunnel_url is not None, + "tunnel_url": tunnel_url, + "is_running": tunnel_manager.is_running } + + elif action == "notifications": + return { + "success": True, + "notifications": tunnel_manager.get_notifications(), + "tunnel_url": tunnel_manager.get_tunnel_url(), + "is_running": tunnel_manager.is_running + } + + return { + "success": False, + "error": "Invalid action. Use 'create', 'stop', 'get', or 'notifications'." + } + +def stop(): + tunnel_manager = TunnelManager.get_instance() + tunnel_manager.stop_tunnel() + return { + "success": True + } diff --git a/python/api/tunnel_proxy.py b/python/api/tunnel_proxy.py index c8b1bd75b1..4df17893a6 100644 --- a/python/api/tunnel_proxy.py +++ b/python/api/tunnel_proxy.py @@ -6,30 +6,33 @@ class TunnelProxy(ApiHandler): async def process(self, input: dict, request: Request) -> dict | Response: - # Get configuration from environment - tunnel_api_port = ( - runtime.get_arg("tunnel_api_port") - or int(dotenv.get_dotenv_value("TUNNEL_API_PORT", 0)) - or 55520 - ) + return await process(input) - # first verify the service is running: +async def process(input: dict) -> dict | Response: + # Get configuration from environment + tunnel_api_port = ( + runtime.get_arg("tunnel_api_port") + or int(dotenv.get_dotenv_value("TUNNEL_API_PORT", 0)) + or 55520 + ) + + # first verify the service is running: + service_ok = False + try: + response = requests.post(f"http://localhost:{tunnel_api_port}/", json={"action": "health"}) + if response.status_code == 200: + service_ok = True + except Exception as e: service_ok = False + + # forward this request to the tunnel service if OK + if service_ok: try: - response = requests.post(f"http://localhost:{tunnel_api_port}/", json={"action": "health"}) - if response.status_code == 200: - service_ok = True + response = requests.post(f"http://localhost:{tunnel_api_port}/", json=input) + return response.json() except Exception as e: - service_ok = False - - # forward this request to the tunnel service if OK - if service_ok: - try: - response = requests.post(f"http://localhost:{tunnel_api_port}/", json=input) - return response.json() - except Exception as e: - return {"error": str(e)} - else: - # forward to API handler directly - from python.api.tunnel import Tunnel - return await Tunnel(self.app, self.thread_lock).process(input, request) + return {"error": str(e)} + else: + # forward to API handler directly + from python.api.tunnel import process as local_process + return await local_process(input) diff --git a/python/extensions/agent_init/_10_initial_message.py b/python/extensions/agent_init/_10_initial_message.py index f64a3fce44..65b5010fdb 100644 --- a/python/extensions/agent_init/_10_initial_message.py +++ b/python/extensions/agent_init/_10_initial_message.py @@ -35,7 +35,6 @@ async def execute(self, **kwargs): # Add to log (green bubble) for immediate UI display self.agent.context.log.log( type="response", - heading=f"{self.agent.agent_name}: Welcome", content=initial_message_text, finished=True, update_progress="none", diff --git a/python/extensions/agent_init/_15_load_profile_settings.py b/python/extensions/agent_init/_15_load_profile_settings.py new file mode 100644 index 0000000000..d4c9b5ab42 --- /dev/null +++ b/python/extensions/agent_init/_15_load_profile_settings.py @@ -0,0 +1,53 @@ +from initialize import initialize_agent +from python.helpers import dirty_json, files, subagents, projects +from python.helpers.extension import Extension + + +class LoadProfileSettings(Extension): + + async def execute(self, **kwargs) -> None: + + if not self.agent or not self.agent.config.profile: + return + + config_files = subagents.get_paths(self.agent, "settings.json", include_default=False) + + settings_override = {} + for settings_path in config_files: + if files.exists(settings_path): + try: + override_settings_str = files.read_file(settings_path) + override_settings = dirty_json.try_parse(override_settings_str) + if isinstance(override_settings, dict): + settings_override.update(override_settings) + else: + raise Exception( + f"Subordinate settings in {settings_path} must be a JSON object." + ) + except Exception as e: + self.agent.context.log.log( + type="error", + content=( + f"Error loading subordinate settings from {settings_path} for " + f"profile '{self.agent.config.profile}': {e}" + ), + ) + + if settings_override: + # Preserve the original memory_subdir unless it's explicitly overridden + current_memory_subdir = self.agent.config.memory_subdir + new_config = initialize_agent(override_settings=settings_override) + if ( + "agent_memory_subdir" not in settings_override + and current_memory_subdir != "default" + ): + new_config.memory_subdir = current_memory_subdir + self.agent.config = new_config + # self.agent.context.log.log( + # type="info", + # content=( + # "Loaded custom settings for agent " + # f"{self.agent.number} with profile '{self.agent.config.profile}'." + # ), + # ) + diff --git a/python/extensions/banners/_10_unsecured_connection.py b/python/extensions/banners/_10_unsecured_connection.py new file mode 100644 index 0000000000..89f5dcb6c5 --- /dev/null +++ b/python/extensions/banners/_10_unsecured_connection.py @@ -0,0 +1,63 @@ +from python.helpers.extension import Extension +from python.helpers import dotenv +import re + + +class UnsecuredConnectionCheck(Extension): + """Check: non-local without credentials, or credentials over non-HTTPS.""" + + async def execute(self, banners: list = [], frontend_context: dict = {}, **kwargs): + hostname = frontend_context.get("hostname", "") + protocol = frontend_context.get("protocol", "") + + auth_login = dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN, "") + auth_password = dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD, "") + has_credentials = bool(auth_login and auth_login.strip() and auth_password and auth_password.strip()) + + is_local = self._is_localhost(hostname) + is_https = protocol == "https:" + + if not is_local and not has_credentials: + banners.append({ + "id": "unsecured-connection", + "type": "warning", + "priority": 80, + "title": "Unsecured Connection", + "html": """You are accessing Agent Zero from a non-local address without authentication. + + Configure credentials in Settings β†’ External Services β†’ Authentication.""", + "dismissible": True, + "source": "backend" + }) + + if has_credentials and not is_local and not is_https: + banners.append({ + "id": "credentials-unencrypted", + "type": "warning", + "priority": 90, + "title": "Credentials May Be Sent Unencrypted", + "html": """Your connection is not using HTTPS. Login credentials may be transmitted in plain text. + Consider using HTTPS or a secure tunnel.""", + "dismissible": True, + "source": "backend" + }) + + def _is_localhost(self, hostname: str) -> bool: + local_patterns = ["localhost", "127.0.0.1", "::1", "0.0.0.0"] + + if hostname in local_patterns: + return True + + # RFC1918 private ranges + if re.match(r"^192\.168\.\d{1,3}\.\d{1,3}$", hostname): + return True + if re.match(r"^10\.\d{1,3}\.\d{1,3}\.\d{1,3}$", hostname): + return True + if re.match(r"^172\.(1[6-9]|2\d|3[01])\.\d{1,3}\.\d{1,3}$", hostname): + return True + + # .local domains + if hostname.endswith(".local"): + return True + + return False diff --git a/python/extensions/banners/_20_missing_api_key.py b/python/extensions/banners/_20_missing_api_key.py new file mode 100644 index 0000000000..9d31a5b509 --- /dev/null +++ b/python/extensions/banners/_20_missing_api_key.py @@ -0,0 +1,64 @@ +from python.helpers.extension import Extension +from python.helpers import settings as settings_helper +import models + + +class MissingApiKeyCheck(Extension): + """Check if API keys are configured for selected model providers.""" + + LOCAL_PROVIDERS = ["ollama", "lm_studio"] + LOCAL_EMBEDDING = ["huggingface"] + MODEL_TYPE_NAMES = { + "chat": "Chat Model", + "utility": "Utility Model", + "browser": "Web Browser Model", + "embedding": "Embedding Model", + } + + async def execute(self, banners: list = [], frontend_context: dict = {}, **kwargs): + current_settings = settings_helper.get_settings() + model_providers = { + "chat": current_settings.get("chat_model_provider", ""), + "utility": current_settings.get("util_model_provider", ""), + "browser": current_settings.get("browser_model_provider", ""), + "embedding": current_settings.get("embed_model_provider", ""), + } + + missing_providers = [] + + for model_type, provider in model_providers.items(): + if not provider: + continue + + provider_lower = provider.lower() + if provider_lower in self.LOCAL_PROVIDERS: + continue + if model_type == "embedding" and provider_lower in self.LOCAL_EMBEDDING: + continue + + api_key = models.get_api_key(provider_lower) + if not (api_key and api_key.strip() and api_key != "None"): + missing_providers.append({ + "model_type": self.MODEL_TYPE_NAMES.get(model_type, model_type), + "provider": provider, + }) + + if not missing_providers: + return + + model_list = ", ".join( + f"{p['model_type']} ({p['provider']})" for p in missing_providers + ) + + banners.append({ + "id": "missing-api-key", + "type": "error", + "priority": 100, + "title": "Missing API Key", + "html": f"""No API key configured for: {model_list}. + Agent Zero will not be able to function properly. + + Add your API key in Settings β†’ External Services β†’ API Keys.""", + "dismissible": False, + "source": "backend" + }) diff --git a/python/extensions/before_main_llm_call/_10_log_for_stream.py b/python/extensions/before_main_llm_call/_10_log_for_stream.py index 49b04b7b7b..6618a0a47f 100644 --- a/python/extensions/before_main_llm_call/_10_log_for_stream.py +++ b/python/extensions/before_main_llm_call/_10_log_for_stream.py @@ -19,8 +19,10 @@ async def execute(self, loop_data: LoopData = LoopData(), text: str = "", **kwar ) ) -def build_heading(agent, text: str): - return f"icon://network_intelligence {agent.agent_name}: {text}" +def build_heading(agent, text: str, icon: str = "network_intelligence"): + # Include agent identifier for all agents (A0:, A1:, A2:, etc.) + agent_prefix = f"{agent.agent_name}: " + return f"icon://{icon} {agent_prefix}{text}" def build_default_heading(agent): return build_heading(agent, "Generating...") \ No newline at end of file diff --git a/python/extensions/error_format/_10_mask_errors.py b/python/extensions/error_format/_10_mask_errors.py index e685fdbbc5..f90cf77730 100644 --- a/python/extensions/error_format/_10_mask_errors.py +++ b/python/extensions/error_format/_10_mask_errors.py @@ -1,5 +1,5 @@ from python.helpers.extension import Extension -from python.helpers.secrets import SecretsManager +from python.helpers.secrets import get_secrets_manager class MaskErrorSecrets(Extension): @@ -10,7 +10,7 @@ async def execute(self, **kwargs): if not msg: return - secrets_mgr = SecretsManager.get_instance() + secrets_mgr = get_secrets_manager(self.agent.context) # Mask the error message if "message" in msg: diff --git a/python/extensions/hist_add_before/_10_mask_content.py b/python/extensions/hist_add_before/_10_mask_content.py index 279c81b146..a59006e62f 100644 --- a/python/extensions/hist_add_before/_10_mask_content.py +++ b/python/extensions/hist_add_before/_10_mask_content.py @@ -1,4 +1,5 @@ from python.helpers.extension import Extension +from python.helpers.secrets import get_secrets_manager class MaskHistoryContent(Extension): @@ -10,8 +11,7 @@ async def execute(self, **kwargs): return try: - from python.helpers.secrets import SecretsManager - secrets_mgr = SecretsManager.get_instance() + secrets_mgr = get_secrets_manager(self.agent.context) # Mask the content before adding to history content_data["content"] = self._mask_content(content_data["content"], secrets_mgr) diff --git a/python/extensions/message_loop_prompts_after/_75_include_project_extras.py b/python/extensions/message_loop_prompts_after/_75_include_project_extras.py new file mode 100644 index 0000000000..87bef95be6 --- /dev/null +++ b/python/extensions/message_loop_prompts_after/_75_include_project_extras.py @@ -0,0 +1,47 @@ +from python.helpers.extension import Extension +from agent import LoopData +from python.helpers import projects + + +class IncludeProjectExtras(Extension): + async def execute(self, loop_data: LoopData = LoopData(), **kwargs): + + # active project + project_name = projects.get_context_project_name(self.agent.context) + if not project_name: + return + + # project config + project = projects.load_basic_project_data(project_name) + + # load file structure if enabled + if project["file_structure"]["enabled"]: + file_structure = projects.get_file_structure(project_name) + gitignore = cleanup_gitignore(project["file_structure"]["gitignore"]) + + # read prompt + file_structure_prompt = self.agent.read_prompt( + "agent.extras.project.file_structure.md", + max_depth=project["file_structure"]["max_depth"], + gitignore=gitignore, + project_name=project_name, + file_structure=file_structure, + ) + # add file structure to the prompt + loop_data.extras_temporary["project_file_structure"] = file_structure_prompt + + +def cleanup_gitignore(gitignore_raw: str) -> str: + """Process gitignore: split lines, strip, remove comments, remove empty lines.""" + gitignore_lines = [] + for line in gitignore_raw.split('\n'): + # Strip whitespace + line = line.strip() + # Remove inline comments (everything after #) + if '#' in line: + line = line.split('#')[0].strip() + # Keep only non-empty lines + if line: + gitignore_lines.append(line) + + return '\n'.join(gitignore_lines) if gitignore_lines else "nothing ignored" diff --git a/python/extensions/reasoning_stream_chunk/_10_mask_stream.py b/python/extensions/reasoning_stream_chunk/_10_mask_stream.py index aef15624a5..07459e64cd 100644 --- a/python/extensions/reasoning_stream_chunk/_10_mask_stream.py +++ b/python/extensions/reasoning_stream_chunk/_10_mask_stream.py @@ -1,4 +1,5 @@ from python.helpers.extension import Extension +from python.helpers.secrets import get_secrets_manager class MaskReasoningStreamChunk(Extension): @@ -10,8 +11,7 @@ async def execute(self, **kwargs): return try: - from python.helpers.secrets import SecretsManager - secrets_mgr = SecretsManager.get_instance() + secrets_mgr = get_secrets_manager(self.agent.context) # Initialize filter if not exists filter_key = "_reason_stream_filter" diff --git a/python/extensions/response_stream/_10_log_from_stream.py b/python/extensions/response_stream/_10_log_from_stream.py index ace6baf547..375b96c032 100644 --- a/python/extensions/response_stream/_10_log_from_stream.py +++ b/python/extensions/response_stream/_10_log_from_stream.py @@ -22,12 +22,9 @@ async def execute( if "headline" in parsed: heading = build_heading(self.agent, parsed['headline']) elif "tool_name" in parsed: - heading = build_heading(self.agent, f"Using tool {parsed['tool_name']}") # if the llm skipped headline + heading = build_heading(self.agent, f"Using {parsed['tool_name']}") # if the llm skipped headline elif "thoughts" in parsed: - # thought length indicator - thoughts = "\n".join(parsed["thoughts"]) - pipes = "|" * math.ceil(math.sqrt(len(thoughts))) - heading = build_heading(self.agent, f"Thinking... {pipes}") + heading = build_default_heading(self.agent) # create log message and store it in loop data temporary params if "log_item_generating" not in loop_data.params_temporary: diff --git a/python/extensions/response_stream_chunk/_10_mask_stream.py b/python/extensions/response_stream_chunk/_10_mask_stream.py index fb52c174a7..fe7eb3dd76 100644 --- a/python/extensions/response_stream_chunk/_10_mask_stream.py +++ b/python/extensions/response_stream_chunk/_10_mask_stream.py @@ -1,6 +1,6 @@ from python.helpers.extension import Extension -from python.helpers.secrets import SecretsManager from agent import Agent, LoopData +from python.helpers.secrets import get_secrets_manager class MaskResponseStreamChunk(Extension): @@ -13,8 +13,7 @@ async def execute(self, **kwargs): return try: - from python.helpers.secrets import SecretsManager - secrets_mgr = SecretsManager.get_instance() + secrets_mgr = get_secrets_manager(self.agent.context) # Initialize filter if not exists filter_key = "_resp_stream_filter" diff --git a/python/extensions/system_prompt/_10_system_prompt.py b/python/extensions/system_prompt/_10_system_prompt.py index eb0089779e..9a17c0ad9a 100644 --- a/python/extensions/system_prompt/_10_system_prompt.py +++ b/python/extensions/system_prompt/_10_system_prompt.py @@ -3,16 +3,23 @@ from python.helpers.mcp_handler import MCPConfig from agent import Agent, LoopData from python.helpers.settings import get_settings +from python.helpers import projects class SystemPrompt(Extension): - async def execute(self, system_prompt: list[str] = [], loop_data: LoopData = LoopData(), **kwargs: Any): + async def execute( + self, + system_prompt: list[str] = [], + loop_data: LoopData = LoopData(), + **kwargs: Any + ): # append main system prompt and tools main = get_main_prompt(self.agent) tools = get_tools_prompt(self.agent) mcp_tools = get_mcp_tools_prompt(self.agent) secrets_prompt = get_secrets_prompt(self.agent) + project_prompt = get_project_prompt(self.agent) system_prompt.append(main) system_prompt.append(tools) @@ -20,6 +27,8 @@ async def execute(self, system_prompt: list[str] = [], loop_data: LoopData = Loo system_prompt.append(mcp_tools) if secrets_prompt: system_prompt.append(secrets_prompt) + if project_prompt: + system_prompt.append(project_prompt) def get_main_prompt(agent: Agent): @@ -29,7 +38,7 @@ def get_main_prompt(agent: Agent): def get_tools_prompt(agent: Agent): prompt = agent.read_prompt("agent.system.tools.md") if agent.config.chat_model.vision: - prompt += '\n\n' + agent.read_prompt("agent.system.tools_vision.md") + prompt += "\n\n" + agent.read_prompt("agent.system.tools_vision.md") return prompt @@ -37,7 +46,9 @@ def get_mcp_tools_prompt(agent: Agent): mcp_config = MCPConfig.get_instance() if mcp_config.servers: pre_progress = agent.context.log.progress - agent.context.log.set_progress("Collecting MCP tools") # MCP might be initializing, better inform via progress bar + agent.context.log.set_progress( + "Collecting MCP tools" + ) # MCP might be initializing, better inform via progress bar tools = MCPConfig.get_instance().get_tools_prompt() agent.context.log.set_progress(pre_progress) # return original progress return tools @@ -47,11 +58,25 @@ def get_mcp_tools_prompt(agent: Agent): def get_secrets_prompt(agent: Agent): try: # Use lazy import to avoid circular dependencies - from python.helpers.secrets import SecretsManager - secrets_manager = SecretsManager.get_instance() + from python.helpers.secrets import get_secrets_manager + + secrets_manager = get_secrets_manager(agent.context) secrets = secrets_manager.get_secrets_for_prompt() vars = get_settings()["variables"] return agent.read_prompt("agent.system.secrets.md", secrets=secrets, vars=vars) except Exception as e: # If secrets module is not available or has issues, return empty string return "" + + +def get_project_prompt(agent: Agent): + result = agent.read_prompt("agent.system.projects.main.md") + project_name = agent.context.get_data(projects.CONTEXT_DATA_KEY_PROJECT) + if project_name: + project_vars = projects.build_system_prompt_vars(project_name) + result += "\n\n" + agent.read_prompt( + "agent.system.projects.active.md", **project_vars + ) + else: + result += "\n\n" + agent.read_prompt("agent.system.projects.inactive.md") + return result diff --git a/python/extensions/system_prompt/_20_behaviour_prompt.py b/python/extensions/system_prompt/_20_behaviour_prompt.py index 5c451eba74..1650e9ee8c 100644 --- a/python/extensions/system_prompt/_20_behaviour_prompt.py +++ b/python/extensions/system_prompt/_20_behaviour_prompt.py @@ -11,7 +11,7 @@ async def execute(self, system_prompt: list[str]=[], loop_data: LoopData = LoopD system_prompt.insert(0, prompt) #.append(prompt) def get_custom_rules_file(agent: Agent): - return memory.get_memory_subdir_abs(agent) + f"/behaviour.md" + return files.get_abs_path(memory.get_memory_subdir_abs(agent), "behaviour.md") def read_rules(agent: Agent): rules_file = get_custom_rules_file(agent) diff --git a/python/extensions/tool_execute_after/_10_mask_secrets.py b/python/extensions/tool_execute_after/_10_mask_secrets.py index 1055364c4b..ae2cdc4efa 100644 --- a/python/extensions/tool_execute_after/_10_mask_secrets.py +++ b/python/extensions/tool_execute_after/_10_mask_secrets.py @@ -1,5 +1,5 @@ from python.helpers.extension import Extension -from python.helpers.secrets import SecretsManager +from python.helpers.secrets import get_secrets_manager from python.helpers.tool import Response @@ -8,5 +8,5 @@ class MaskToolSecrets(Extension): async def execute(self, response: Response | None = None, **kwargs): if not response: return - secrets_mgr = SecretsManager.get_instance() + secrets_mgr = get_secrets_manager(self.agent.context) response.message = secrets_mgr.mask_values(response.message) diff --git a/python/extensions/tool_execute_before/_10_unmask_secrets.py b/python/extensions/tool_execute_before/_10_unmask_secrets.py index 347d67d1b4..9025812291 100644 --- a/python/extensions/tool_execute_before/_10_unmask_secrets.py +++ b/python/extensions/tool_execute_before/_10_unmask_secrets.py @@ -1,5 +1,5 @@ from python.helpers.extension import Extension -from python.helpers.secrets import SecretsManager +from python.helpers.secrets import get_secrets_manager class UnmaskToolSecrets(Extension): @@ -10,7 +10,7 @@ async def execute(self, **kwargs): if not tool_args: return - secrets_mgr = SecretsManager.get_instance() + secrets_mgr = get_secrets_manager(self.agent.context) # Unmask placeholders in args for actual tool execution for k, v in tool_args.items(): diff --git a/python/extensions/user_message_ui/_10_update_check.py b/python/extensions/user_message_ui/_10_update_check.py new file mode 100644 index 0000000000..ad967f44d2 --- /dev/null +++ b/python/extensions/user_message_ui/_10_update_check.py @@ -0,0 +1,58 @@ +from python.helpers import notification +from python.helpers.extension import Extension +from agent import LoopData +from python.helpers import settings, update_check +import datetime + + +# check for newer versions of A0 available and send notification +# check after user message is sent from UI, not API, MCP etc. (user is active and can see the notification) +# do not check too often, use cooldown +# do not notify too often unless there's a different notification + +last_check = datetime.datetime.fromtimestamp(0) +check_cooldown_seconds = 60 +last_notification_id = "" +last_notification_time = datetime.datetime.fromtimestamp(0) +notification_cooldown_seconds = 60 * 60 * 24 + +class UpdateCheck(Extension): + + async def execute(self, loop_data: LoopData = LoopData(), text: str = "", **kwargs): + try: + global last_check, last_notification_id, last_notification_time + + # first check if update check is enabled + current_settings = settings.get_settings() + if not current_settings["update_check_enabled"]: + return + + # check if cooldown has passed + if (datetime.datetime.now() - last_check).total_seconds() < check_cooldown_seconds: + return + last_check = datetime.datetime.now() + + # check for updates + version = await update_check.check_version() + + # if the user should update, send notification + if notif := version.get("notification"): + if notif.get("id") != last_notification_id or (datetime.datetime.now() - last_notification_time).total_seconds() > notification_cooldown_seconds: + last_notification_id = notif.get("id") + last_notification_time = datetime.datetime.now() + self.send_notification(notif) + except Exception as e: + pass # no need to log if the update server is inaccessible + + + def send_notification(self, notif): + notifs = self.agent.context.get_notification_manager() + notifs.send_notification( + title=notif.get("title", "Newer version available"), + message=notif.get("message", "A newer version of Agent Zero is available. Please update to the latest version."), + type=notif.get("type", "info"), + detail=notif.get("detail", ""), + display_time=notif.get("display_time", 10), + group=notif.get("group", "update_check"), + priority=notif.get("priority", notification.NotificationPriority.NORMAL), + ) diff --git a/python/extensions/util_model_call_before/_10_mask_secrets.py b/python/extensions/util_model_call_before/_10_mask_secrets.py index beeffe950f..df23ff1f66 100644 --- a/python/extensions/util_model_call_before/_10_mask_secrets.py +++ b/python/extensions/util_model_call_before/_10_mask_secrets.py @@ -1,5 +1,5 @@ from python.helpers.extension import Extension -from python.helpers.secrets import SecretsManager +from python.helpers.secrets import get_secrets_manager class MaskToolSecrets(Extension): @@ -8,7 +8,7 @@ async def execute(self, **kwargs): # model call data call_data:dict = kwargs.get("call_data", {}) - secrets_mgr = SecretsManager.get_instance() + secrets_mgr = get_secrets_manager(self.agent.context) # mask system and user message if system:=call_data.get("system"): diff --git a/python/helpers/api.py b/python/helpers/api.py index 5d0e171f58..6c90c6e566 100644 --- a/python/helpers/api.py +++ b/python/helpers/api.py @@ -57,7 +57,9 @@ async def handle_request(self, request: Request) -> Response: PrintStyle().print(f"Error parsing JSON: {str(e)}") input_data = {} else: - input_data = {"data": request.get_data(as_text=True)} + # input_data = {"data": request.get_data(as_text=True)} + input_data = {} + # process via handler output = await self.process(input_data, request) @@ -78,14 +80,21 @@ async def handle_request(self, request: Request) -> Response: return Response(response=error, status=500, mimetype="text/plain") # get context to run agent zero in - def get_context(self, ctxid: str): + def use_context(self, ctxid: str, create_if_not_exists: bool = True): with self.thread_lock: if not ctxid: first = AgentContext.first() if first: + AgentContext.use(first.id) return first - return AgentContext(config=initialize_agent()) - got = AgentContext.get(ctxid) + context = AgentContext(config=initialize_agent(), set_current=True) + return context + got = AgentContext.use(ctxid) if got: return got - return AgentContext(config=initialize_agent(), id=ctxid) + if create_if_not_exists: + context = AgentContext(config=initialize_agent(), id=ctxid, set_current=True) + return context + else: + raise Exception(f"Context {ctxid} not found") + diff --git a/python/helpers/backup.py b/python/helpers/backup.py index 81f78fe5eb..4e4873371d 100644 --- a/python/helpers/backup.py +++ b/python/helpers/backup.py @@ -75,9 +75,14 @@ def _get_default_patterns(self) -> str: # Configuration and Settings (CRITICAL) {agent_root}/.env {agent_root}/tmp/settings.json +{agent_root}/tmp/secrets.env {agent_root}/tmp/chats/** {agent_root}/tmp/scheduler/** -{agent_root}/tmp/uploads/**""" +{agent_root}/tmp/uploads/** + +# User data +{agent_root}/usr/** +""" def _get_agent_zero_version(self) -> str: """Get current Agent Zero version""" diff --git a/python/helpers/context.py b/python/helpers/context.py new file mode 100644 index 0000000000..2dc5609f0e --- /dev/null +++ b/python/helpers/context.py @@ -0,0 +1,46 @@ +from contextvars import ContextVar +from typing import Any, TypeVar, cast, Optional, Dict + +T = TypeVar("T") + +# no mutable default β€” None is safe +_context_data: ContextVar[Optional[Dict[str, Any]]] = ContextVar("_context_data", default=None) + + +def _ensure_context() -> Dict[str, Any]: + """Make sure a context dict exists, and return it.""" + data = _context_data.get() + if data is None: + data = {} + _context_data.set(data) + return data + + +def set_context_data(key: str, value: Any): + """Set context data for the current async/task context.""" + data = _ensure_context() + if data.get(key) == value: + return + data[key] = value + _context_data.set(data) + + +def delete_context_data(key: str): + """Delete a key from the current async/task context.""" + data = _ensure_context() + if key in data: + del data[key] + _context_data.set(data) + + +def get_context_data(key: Optional[str] = None, default: T = None) -> T: + """Get a key from the current context, or the full dict if key is None.""" + data = _ensure_context() + if key is None: + return cast(T, data) + return cast(T, data.get(key, default)) + + +def clear_context_data(): + """Completely clear the context dict.""" + _context_data.set({}) diff --git a/python/helpers/defer.py b/python/helpers/defer.py index dc96efe5aa..8c2c7e86ef 100644 --- a/python/helpers/defer.py +++ b/python/helpers/defer.py @@ -6,8 +6,9 @@ T = TypeVar("T") + class EventLoopThread: - _instances = {} + _instances: dict[str, "EventLoopThread"] = {} _lock = threading.Lock() def __init__(self, thread_name: str = "Background") -> None: @@ -38,8 +39,29 @@ def _run_event_loop(self): self.loop.run_forever() def terminate(self): - if self.loop and self.loop.is_running(): - self.loop.stop() + loop = getattr(self, "loop", None) + thread = getattr(self, "thread", None) + + if not loop: + return + + if loop.is_running(): + if thread and thread is threading.current_thread(): + loop.stop() + else: + loop.call_soon_threadsafe(loop.stop) + if thread: + thread.join() + elif thread and thread.is_alive() and thread is not threading.current_thread(): + thread.join() + + if not loop.is_closed(): + loop.close() + + with self.__class__._lock: + if self.thread_name in self.__class__._instances: + del self.__class__._instances[self.thread_name] + self.loop = None self.thread = None @@ -79,6 +101,12 @@ def __del__(self): def _start_task(self): self._future = self.event_loop_thread.run_coroutine(self._run()) + if self._future: + self._future.add_done_callback(self._on_task_done) + + def _on_task_done(self, _future: Future): + # Ensure child background tasks are always cleaned up once the parent finishes + self.kill_children() async def _run(self): return await self.func(*self.args, **self.kwargs) @@ -120,30 +148,16 @@ def kill(self, terminate_thread: bool = False) -> None: if self._future and not self._future.done(): self._future.cancel() - if ( - terminate_thread - and self.event_loop_thread.loop - and self.event_loop_thread.loop.is_running() - ): - - def cleanup(): - tasks = [ - t - for t in asyncio.all_tasks(self.event_loop_thread.loop) - if t is not asyncio.current_task(self.event_loop_thread.loop) - ] - for task in tasks: - task.cancel() - try: - # Give tasks a chance to cleanup - if self.event_loop_thread.loop: - self.event_loop_thread.loop.run_until_complete( - asyncio.gather(task, return_exceptions=True) - ) - except Exception: - pass # Ignore cleanup errors - - self.event_loop_thread.loop.call_soon_threadsafe(cleanup) + if terminate_thread and self.event_loop_thread.loop: + if self.event_loop_thread.loop.is_running(): + try: + cleanup_future = asyncio.run_coroutine_threadsafe( + self._drain_event_loop_tasks(), self.event_loop_thread.loop + ) + cleanup_future.result() + except Exception: + pass + self.event_loop_thread.terminate() def kill_children(self) -> None: @@ -196,3 +210,19 @@ async def wrapped(): asyncio.run_coroutine_threadsafe(wrapped(), self.event_loop_thread.loop) return asyncio.wrap_future(future) + + @staticmethod + async def _drain_event_loop_tasks(): + """Cancel and await all pending tasks on the current event loop.""" + loop = asyncio.get_running_loop() + current_task = asyncio.current_task(loop=loop) + pending = [ + task + for task in asyncio.all_tasks(loop=loop) + if task is not current_task + ] + if not pending: + return + for task in pending: + task.cancel() + await asyncio.gather(*pending, return_exceptions=True) diff --git a/python/helpers/document_query.py b/python/helpers/document_query.py index d60a86574e..6ba38855c6 100644 --- a/python/helpers/document_query.py +++ b/python/helpers/document_query.py @@ -361,15 +361,22 @@ def __init__( self.progress_callback = progress_callback or (lambda x: None) async def document_qa( - self, document_uri: str, questions: Sequence[str] + self, document_uris: List[str], questions: Sequence[str] ) -> Tuple[bool, str]: - self.progress_callback(f"Starting Q&A process") + self.progress_callback( + f"Starting Q&A process for {len(document_uris)} documents" + ) + await self.agent.handle_intervention() - # index document - _ = await self.document_get_content(document_uri, True) + # index documents + await asyncio.gather( + *[self.document_get_content(uri, True) for uri in document_uris] + ) + await self.agent.handle_intervention() selected_chunks = {} for question in questions: self.progress_callback(f"Optimizing query: {question}") + await self.agent.handle_intervention() human_content = f'Search Query: "{question}"' system_content = self.agent.parse_prompt( "fw.document_query.optmimize_query.md" @@ -381,14 +388,19 @@ async def document_qa( ) ).strip() - self.progress_callback(f"Searching document with query: {optimized_query}") + await self.agent.handle_intervention() + self.progress_callback(f"Searching documents with query: {optimized_query}") + + normalized_uris = [self.store.normalize_uri(uri) for uri in document_uris] + doc_filter = " or ".join( + [f"document_uri == '{uri}'" for uri in normalized_uris] + ) - normalized_uri = self.store.normalize_uri(document_uri) - chunks = await self.store.search_document( - document_uri=normalized_uri, + chunks = await self.store.search_documents( query=optimized_query, limit=100, threshold=DEFAULT_SEARCH_THRESHOLD, + filter=doc_filter, ) self.progress_callback(f"Found {len(chunks)} chunks") @@ -397,13 +409,14 @@ async def document_qa( selected_chunks[chunk.metadata["id"]] = chunk if not selected_chunks: - self.progress_callback(f"No relevant content found in the document") - content = f"!!! No content found for document: {document_uri} matching queries: {json.dumps(questions)}" + self.progress_callback("No relevant content found in the documents") + content = f"!!! No content found for documents: {json.dumps(document_uris)} matching queries: {json.dumps(questions)}" return False, content self.progress_callback( f"Processing {len(questions)} questions in context of {len(selected_chunks)} chunks" ) + await self.agent.handle_intervention() questions_str = "\n".join([f" * {question}" for question in questions]) content = "\n\n----\n\n".join( @@ -430,6 +443,7 @@ async def document_get_content( self, document_uri: str, add_to_db: bool = False ) -> str: self.progress_callback(f"Fetching document content") + await self.agent.handle_intervention() url = urlparse(document_uri) scheme = url.scheme or "file" mimetype, encoding = mimetypes.guess_type(document_uri) @@ -455,6 +469,7 @@ async def document_get_content( await asyncio.sleep(1) last_error = str(e) retries += 1 + await self.agent.handle_intervention() if not response: raise ValueError( @@ -492,9 +507,11 @@ async def document_get_content( # Use the store's normalization method document_uri_norm = self.store.normalize_uri(document_uri) + await self.agent.handle_intervention() exists = await self.store.document_exists(document_uri_norm) document_content = "" if not exists: + await self.agent.handle_intervention() if mimetype.startswith("image/"): document_content = self.handle_image_document(document_uri, scheme) elif mimetype == "text/html": @@ -509,6 +526,7 @@ async def document_get_content( ) if add_to_db: self.progress_callback(f"Indexing document") + await self.agent.handle_intervention() success, ids = await self.store.add_document( document_content, document_uri_norm ) @@ -519,6 +537,7 @@ async def document_get_content( ) self.progress_callback(f"Indexed {len(ids)} chunks") else: + await self.agent.handle_intervention() doc = await self.store.get_document(document_uri_norm) if doc: document_content = doc.page_content diff --git a/python/helpers/email_client.py b/python/helpers/email_client.py new file mode 100644 index 0000000000..741e008991 --- /dev/null +++ b/python/helpers/email_client.py @@ -0,0 +1,587 @@ +import asyncio +import email +import os +import re +import uuid +from dataclasses import dataclass +from email.header import decode_header +from email.message import Message as EmailMessage +from fnmatch import fnmatch +from typing import Any, Dict, List, Optional, Tuple + +import html2text +from bs4 import BeautifulSoup +from imapclient import IMAPClient + +from python.helpers import files +from python.helpers.errors import RepairableException, format_error +from python.helpers.print_style import PrintStyle + + +@dataclass +class Message: + """Email message representation with sender, subject, body, and attachments.""" + sender: str + subject: str + body: str + attachments: List[str] + + +class EmailClient: + """ + Async email client for reading messages from IMAP and Exchange servers. + + """ + + def __init__( + self, + account_type: str = "imap", + server: str = "", + port: int = 993, + username: str = "", + password: str = "", + options: Optional[Dict[str, Any]] = None, + ): + """ + Initialize email client with connection parameters. + + Args: + account_type: Type of account - "imap" or "exchange" + server: Mail server address (e.g., "imap.gmail.com") + port: Server port (default 993 for IMAP SSL) + username: Email account username + password: Email account password + options: Optional configuration dict with keys: + - ssl: Use SSL/TLS (default: True) + - timeout: Connection timeout in seconds (default: 30) + """ + self.account_type = account_type.lower() + self.server = server + self.port = port + self.username = username + self.password = password + self.options = options or {} + + # Default options + self.ssl = self.options.get("ssl", True) + self.timeout = self.options.get("timeout", 30) + + self.client: Optional[IMAPClient] = None + self.exchange_account = None + + async def connect(self) -> None: + """Establish connection to email server.""" + try: + if self.account_type == "imap": + await self._connect_imap() + elif self.account_type == "exchange": + await self._connect_exchange() + else: + raise RepairableException( + f"Unsupported account type: {self.account_type}. " + "Supported types: 'imap', 'exchange'" + ) + except Exception as e: + err = format_error(e) + PrintStyle.error(f"Failed to connect to email server: {err}") + raise RepairableException(f"Email connection failed: {err}") from e + + async def _connect_imap(self) -> None: + """Establish IMAP connection.""" + loop = asyncio.get_event_loop() + + def _sync_connect(): + client = IMAPClient(self.server, port=self.port, ssl=self.ssl, timeout=self.timeout) + # Increase line length limit to handle large emails (default is 10000) + # This fixes "line too long" errors for emails with large headers or embedded content + client._imap._maxline = 100000 + client.login(self.username, self.password) + return client + + self.client = await loop.run_in_executor(None, _sync_connect) + PrintStyle.standard(f"Connected to IMAP server: {self.server}") + + async def _connect_exchange(self) -> None: + """Establish Exchange connection.""" + try: + from exchangelib import Account, Configuration, Credentials, DELEGATE + + loop = asyncio.get_event_loop() + + def _sync_connect(): + creds = Credentials(username=self.username, password=self.password) + config = Configuration(server=self.server, credentials=creds) + return Account( + primary_smtp_address=self.username, + config=config, + autodiscover=False, + access_type=DELEGATE + ) + + self.exchange_account = await loop.run_in_executor(None, _sync_connect) + PrintStyle.standard(f"Connected to Exchange server: {self.server}") + except ImportError as e: + raise RepairableException( + "exchangelib not installed. Install with: pip install exchangelib>=5.4.3" + ) from e + + async def disconnect(self) -> None: + """Clean up connection.""" + try: + if self.client: + loop = asyncio.get_event_loop() + await loop.run_in_executor(None, self.client.logout) + self.client = None + PrintStyle.standard("Disconnected from IMAP server") + elif self.exchange_account: + self.exchange_account = None + PrintStyle.standard("Disconnected from Exchange server") + except Exception as e: + PrintStyle.error(f"Error during disconnect: {format_error(e)}") + + async def read_messages( + self, + download_folder: str, + filter: Optional[Dict[str, Any]] = None, + ) -> List[Message]: + """ + Read messages based on filter criteria. + + Args: + download_folder: Folder to save attachments (relative to /a0/) + filter: Filter criteria dict with keys: + - unread: Boolean to filter unread messages (default: True) + - sender: Sender pattern with wildcards (e.g., "*@company.com") + - subject: Subject pattern with wildcards (e.g., "*invoice*") + - since_date: Optional datetime for date filtering + + Returns: + List of Message objects with attachments saved to download_folder + """ + filter = filter or {} + + if self.account_type == "imap": + return await self._fetch_imap_messages(download_folder, filter) + elif self.account_type == "exchange": + return await self._fetch_exchange_messages(download_folder, filter) + else: + raise RepairableException(f"Unsupported account type: {self.account_type}") + + async def _fetch_imap_messages( + self, + download_folder: str, + filter: Dict[str, Any], + ) -> List[Message]: + """Fetch messages from IMAP server.""" + if not self.client: + raise RepairableException("IMAP client not connected. Call connect() first.") + + loop = asyncio.get_event_loop() + messages: List[Message] = [] + + def _sync_fetch(): + # Select inbox + self.client.select_folder("INBOX") + + # Build search criteria + search_criteria = [] + if filter.get("unread", True): + search_criteria.append("UNSEEN") + + if filter.get("since_date"): + since_date = filter["since_date"] + search_criteria.append(["SINCE", since_date]) + + # Search for messages + if not search_criteria: + search_criteria = ["ALL"] + + message_ids = self.client.search(search_criteria) + return message_ids + + message_ids = await loop.run_in_executor(None, _sync_fetch) + + if not message_ids: + PrintStyle.hint("No messages found matching criteria") + return messages + + PrintStyle.standard(f"Found {len(message_ids)} messages") + + # Fetch and process messages + for msg_id in message_ids: + try: + msg = await self._fetch_and_parse_imap_message(msg_id, download_folder, filter) + if msg: + messages.append(msg) + except Exception as e: + PrintStyle.error(f"Error processing message {msg_id}: {format_error(e)}") + continue + + return messages + + async def _fetch_and_parse_imap_message( + self, + msg_id: int, + download_folder: str, + filter: Dict[str, Any], + ) -> Optional[Message]: + """Fetch and parse a single IMAP message with retry logic for large messages.""" + loop = asyncio.get_event_loop() + + def _sync_fetch(): + try: + # Try standard RFC822 fetch first + return self.client.fetch([msg_id], ["RFC822"])[msg_id] + except Exception as e: + error_msg = str(e).lower() + # If "line too long" error, try fetching in parts + if "line too long" in error_msg or "fetch_failed" in error_msg: + PrintStyle.warning(f"Message {msg_id} too large for standard fetch, trying alternative method") + # Fetch headers and body separately to avoid line length issues + try: + envelope = self.client.fetch([msg_id], ["BODY.PEEK[]"])[msg_id] + return envelope + except Exception as e2: + PrintStyle.error(f"Alternative fetch also failed for message {msg_id}: {format_error(e2)}") + raise + raise + + try: + raw_msg = await loop.run_in_executor(None, _sync_fetch) + + # Extract email data from response + if b"RFC822" in raw_msg: + email_data = raw_msg[b"RFC822"] + elif b"BODY[]" in raw_msg: + email_data = raw_msg[b"BODY[]"] + else: + PrintStyle.error(f"Unexpected response format for message {msg_id}") + return None + + email_msg = email.message_from_bytes(email_data) + + # Apply sender filter + sender = self._decode_header(email_msg.get("From", "")) + if filter.get("sender") and not fnmatch(sender, filter["sender"]): + return None + + # Apply subject filter + subject = self._decode_header(email_msg.get("Subject", "")) + if filter.get("subject") and not fnmatch(subject, filter["subject"]): + return None + + # Parse message + return await self._parse_message(email_msg, download_folder) + + except Exception as e: + PrintStyle.error(f"Failed to fetch/parse message {msg_id}: {format_error(e)}") + return None + + async def _fetch_exchange_messages( + self, + download_folder: str, + filter: Dict[str, Any], + ) -> List[Message]: + """Fetch messages from Exchange server.""" + if not self.exchange_account: + raise RepairableException("Exchange account not connected. Call connect() first.") + + from exchangelib import Q + + loop = asyncio.get_event_loop() + messages: List[Message] = [] + + def _sync_fetch(): + # Build query + query = None + if filter.get("unread", True): + query = Q(is_read=False) + + if filter.get("sender"): + sender_pattern = filter["sender"].replace("*", "") + sender_q = Q(sender__contains=sender_pattern) + query = query & sender_q if query else sender_q + + if filter.get("subject"): + subject_pattern = filter["subject"].replace("*", "") + subject_q = Q(subject__contains=subject_pattern) + query = query & subject_q if query else subject_q + + # Fetch messages from inbox + inbox = self.exchange_account.inbox + items = inbox.filter(query) if query else inbox.all() + return list(items) + + exchange_messages = await loop.run_in_executor(None, _sync_fetch) + + PrintStyle.standard(f"Found {len(exchange_messages)} Exchange messages") + + # Process messages + for ex_msg in exchange_messages: + try: + msg = await self._parse_exchange_message(ex_msg, download_folder) + if msg: + messages.append(msg) + except Exception as e: + PrintStyle.error(f"Error processing Exchange message: {format_error(e)}") + continue + + return messages + + async def _parse_exchange_message( + self, + ex_msg, + download_folder: str, + ) -> Message: + """Parse an Exchange message.""" + loop = asyncio.get_event_loop() + + def _get_body(): + return str(ex_msg.text_body or ex_msg.body or "") + + body = await loop.run_in_executor(None, _get_body) + + # Process HTML if present + if ex_msg.body and str(ex_msg.body).strip().startswith("<"): + body = self._html_to_text(str(ex_msg.body)) + + # Save attachments + attachment_paths = [] + if ex_msg.attachments: + for attachment in ex_msg.attachments: + if hasattr(attachment, "content"): + path = await self._save_attachment_bytes( + attachment.name, + attachment.content, + download_folder + ) + attachment_paths.append(path) + + return Message( + sender=str(ex_msg.sender.email_address) if ex_msg.sender else "", + subject=str(ex_msg.subject or ""), + body=body, + attachments=attachment_paths + ) + + async def _parse_message( + self, + email_msg: EmailMessage, + download_folder: str, + ) -> Message: + """ + Parse email message and extract content with inline attachments. + + Processes multipart messages, converts HTML to text, and maintains + positional context for inline attachments. + """ + sender = self._decode_header(email_msg.get("From", "")) + subject = self._decode_header(email_msg.get("Subject", "")) + + # Extract body and attachments + body = "" + attachment_paths: List[str] = [] + cid_map: Dict[str, str] = {} # Map Content-ID to file paths + body_parts: List[str] = [] # Track parts in order + + if email_msg.is_multipart(): + # Process parts in order to maintain attachment positions + for part in email_msg.walk(): + content_type = part.get_content_type() + content_disposition = str(part.get("Content-Disposition", "")) + + # Skip multipart containers + if part.get_content_maintype() == "multipart": + continue + + # Handle attachments + if "attachment" in content_disposition or part.get("Content-ID"): + filename = part.get_filename() + if filename: + filename = self._decode_header(filename) + content = part.get_payload(decode=True) + if content: + path = await self._save_attachment_bytes( + filename, content, download_folder + ) + attachment_paths.append(path) + + # Map Content-ID for inline images + cid = part.get("Content-ID") + if cid: + cid = cid.strip("<>") + cid_map[cid] = path + + # Add positional marker for non-cid attachments + # (cid attachments are positioned via HTML references) + if not cid and body_parts: + body_parts.append(f"\n[file://{path}]\n") + + # Handle body text + elif content_type == "text/plain": + if not body: # Use first text/plain as primary body + charset = part.get_content_charset() or "utf-8" + body = part.get_payload(decode=True).decode(charset, errors="ignore") + body_parts.append(body) + + elif content_type == "text/html": + if not body: # Use first text/html as primary body if no text/plain + charset = part.get_content_charset() or "utf-8" + html_content = part.get_payload(decode=True).decode(charset, errors="ignore") + body = self._html_to_text(html_content, cid_map) + body_parts.append(body) + + # Combine body parts if we built them up + if len(body_parts) > 1: + body = "".join(body_parts) + else: + # Single part message + content_type = email_msg.get_content_type() + charset = email_msg.get_content_charset() or "utf-8" + content = email_msg.get_payload(decode=True) + if content: + if content_type == "text/html": + body = self._html_to_text(content.decode(charset, errors="ignore"), cid_map) + else: + body = content.decode(charset, errors="ignore") + + return Message( + sender=sender, + subject=subject, + body=body, + attachments=attachment_paths + ) + + def _html_to_text(self, html_content: str, cid_map: Optional[Dict[str, str]] = None) -> str: + """ + Convert HTML to plain text with inline attachment references. + + Replaces inline images with [file:///a0/...] markers to maintain + positional context. + """ + cid_map = cid_map or {} + + # Replace cid: references with file paths before conversion + if cid_map: + soup = BeautifulSoup(html_content, "html.parser") + for img in soup.find_all("img"): + src = img.get("src", "") + if src.startswith("cid:"): + cid = src[4:] # Remove "cid:" prefix + if cid in cid_map: + # Replace with file path marker + file_marker = f"[file://{cid_map[cid]}]" + img.replace_with(soup.new_string(file_marker)) + html_content = str(soup) + + # Convert HTML to text + h = html2text.HTML2Text() + h.ignore_links = False + h.ignore_images = False + h.ignore_emphasis = False + h.body_width = 0 # Don't wrap lines + + text = h.handle(html_content) + + # Clean up extra whitespace + text = re.sub(r"\n{3,}", "\n\n", text) # Max 2 consecutive newlines + text = text.strip() + + return text + + async def _save_attachment_bytes( + self, + filename: str, + content: bytes, + download_folder: str, + ) -> str: + """ + Save attachment to disk and return absolute path. + + Uses Agent Zero's file helpers for path management. + """ + # Sanitize filename + filename = files.safe_file_name(filename) + + # Generate unique filename if needed + unique_id = uuid.uuid4().hex[:8] + name, ext = os.path.splitext(filename) + unique_filename = f"{name}_{unique_id}{ext}" + + # Build relative path and save + relative_path = os.path.join(download_folder, unique_filename) + files.write_file_bin(relative_path, content) + + # Return absolute path + abs_path = files.get_abs_path(relative_path) + return abs_path + + def _decode_header(self, header: str) -> str: + """Decode email header handling various encodings.""" + if not header: + return "" + + decoded_parts = [] + for part, encoding in decode_header(header): + if isinstance(part, bytes): + decoded_parts.append(part.decode(encoding or "utf-8", errors="ignore")) + else: + decoded_parts.append(str(part)) + + return " ".join(decoded_parts) + + +async def read_messages( + account_type: str = "imap", + server: str = "", + port: int = 993, + username: str = "", + password: str = "", + download_folder: str = "tmp/email", + options: Optional[Dict[str, Any]] = None, + filter: Optional[Dict[str, Any]] = None, +) -> List[Message]: + """ + Convenience wrapper for reading email messages. + + Automatically handles connection and disconnection. + + Args: + account_type: "imap" or "exchange" + server: Mail server address + port: Server port (default 993 for IMAP SSL) + username: Email username + password: Email password + download_folder: Folder to save attachments (relative to /a0/) + options: Optional configuration dict + filter: Filter criteria dict + + Returns: + List of Message objects + + Example: + from python.helpers.email_client import read_messages + messages = await read_messages( + server="imap.gmail.com", + port=993, + username=secrets.get("EMAIL_USER"), + password=secrets.get("EMAIL_PASSWORD"), + download_folder="tmp/email/inbox", + filter={"unread": True, "sender": "*@company.com"} + ) + """ + client = EmailClient( + account_type=account_type, + server=server, + port=port, + username=username, + password=password, + options=options, + ) + + try: + await client.connect() + messages = await client.read_messages(download_folder, filter) + return messages + finally: + await client.disconnect() diff --git a/python/helpers/extension.py b/python/helpers/extension.py index 5c12d48066..186099cc02 100644 --- a/python/helpers/extension.py +++ b/python/helpers/extension.py @@ -1,14 +1,22 @@ from abc import abstractmethod from typing import Any -from python.helpers import extract_tools, files +from python.helpers import extract_tools, files from typing import TYPE_CHECKING + if TYPE_CHECKING: from agent import Agent + +DEFAULT_EXTENSIONS_FOLDER = "python/extensions" +USER_EXTENSIONS_FOLDER = "usr/extensions" + +_cache: dict[str, list[type["Extension"]]] = {} + + class Extension: def __init__(self, agent: "Agent|None", **kwargs): - self.agent: "Agent" = agent # type: ignore < here we ignore the type check as there are currently no extensions without an agent + self.agent: "Agent" = agent # type: ignore < here we ignore the type check as there are currently no extensions without an agent self.kwargs = kwargs @abstractmethod @@ -16,25 +24,26 @@ async def execute(self, **kwargs) -> Any: pass -async def call_extensions(extension_point: str, agent: "Agent|None" = None, **kwargs) -> Any: - - # get default extensions - defaults = await _get_extensions("python/extensions/" + extension_point) - classes = defaults +async def call_extensions( + extension_point: str, agent: "Agent|None" = None, **kwargs +) -> Any: + from python.helpers import projects, subagents - # get agent extensions - if agent and agent.config.profile: - agentics = await _get_extensions("agents/" + agent.config.profile + "/extensions/" + extension_point) - if agentics: - # merge them, agentics overwrite defaults - unique = {} - for cls in defaults + agentics: - unique[_get_file_from_module(cls.__module__)] = cls + # search for extension folders in all agent's paths + paths = subagents.get_paths(agent, "extensions", extension_point, default_root="python") + all_exts = [cls for path in paths for cls in _get_extensions(path)] - # sort by name - classes = sorted(unique.values(), key=lambda cls: _get_file_from_module(cls.__module__)) + # merge: first ocurrence of file name is the override + unique = {} + for cls in all_exts: + file = _get_file_from_module(cls.__module__) + if file not in unique: + unique[file] = cls + classes = sorted( + unique.values(), key=lambda cls: _get_file_from_module(cls.__module__) + ) - # call extensions + # execute unique extensions for cls in classes: await cls(agent=agent).execute(**kwargs) @@ -42,8 +51,8 @@ async def call_extensions(extension_point: str, agent: "Agent|None" = None, **kw def _get_file_from_module(module_name: str) -> str: return module_name.split(".")[-1] -_cache: dict[str, list[type[Extension]]] = {} -async def _get_extensions(folder:str): + +def _get_extensions(folder: str): global _cache folder = files.get_abs_path(folder) if folder in _cache: @@ -51,10 +60,7 @@ async def _get_extensions(folder:str): else: if not files.exists(folder): return [] - classes = extract_tools.load_classes_from_folder( - folder, "*", Extension - ) + classes = extract_tools.load_classes_from_folder(folder, "*", Extension) _cache[folder] = classes return classes - diff --git a/python/helpers/fasta2a_server.py b/python/helpers/fasta2a_server.py index d058d1b683..000c5a39df 100644 --- a/python/helpers/fasta2a_server.py +++ b/python/helpers/fasta2a_server.py @@ -6,7 +6,7 @@ import contextlib import threading -from python.helpers import settings +from python.helpers import settings, projects from starlette.requests import Request # Local imports @@ -84,6 +84,14 @@ async def run_task(self, params: Any) -> None: # params: TaskSendParams cfg = initialize_agent() context = AgentContext(cfg, type=AgentContextType.BACKGROUND) + # Retrieve project from message.metadata (standard A2A pattern) + metadata = message.get('metadata', {}) or {} + project_name = metadata.get('project') + + # Activate project if specified + if project_name: + projects.activate_project(context.id, project_name) + # Log user message so it appears instantly in UI chat window context.log.log( type="user", # type: ignore[arg-type] @@ -424,6 +432,9 @@ async def __call__(self, scope, receive, send): if path.startswith('/a2a'): path = path[4:] # Remove '/a2a' prefix + # Initialize project name + project_name = None + # Check if path matches token pattern /t-{token}/ if path.startswith('/t-'): # Extract token from path @@ -431,6 +442,14 @@ async def __call__(self, scope, receive, send): path_parts = path[3:].split('/', 1) # Remove '/t-' prefix request_token = path_parts[0] remaining_path = '/' + path_parts[1] if len(path_parts) > 1 else '/' + + # Check for project pattern /p-{project}/ + if remaining_path.startswith('/p-'): + project_parts = remaining_path[3:].split('/', 1) + if project_parts[0]: + project_name = project_parts[0] + remaining_path = '/' + project_parts[1] if len(project_parts) > 1 else '/' + _PRINTER.print(f"[A2A] Extracted project from URL: {project_name}") else: request_token = path[3:] remaining_path = '/' @@ -452,6 +471,54 @@ async def __call__(self, scope, receive, send): }) return + # If project specified, inject it into the request payload + if project_name: + # Buffer messages and modify before returning the complete body + received_messages = [] + body_modified = False + original_receive = receive + + async def receive_wrapper(): + nonlocal body_modified + + # Receive and buffer the next message + message = await original_receive() + received_messages.append(message) + + # When we get the complete body, inject project into JSON + if message['type'] == 'http.request' and not message.get('more_body', False) and not body_modified: + body_modified = True + try: + import json + # Reconstruct full body from all buffered messages + body_parts = [msg.get('body', b'') for msg in received_messages if msg['type'] == 'http.request'] + full_body = b''.join(body_parts) + data = json.loads(full_body) + + # INJECT project into message.metadata (standard A2A pattern) + if 'params' in data and 'message' in data['params']: + msg_data = data['params']['message'] + # Initialize metadata if it doesn't exist + if 'metadata' not in msg_data or msg_data['metadata'] is None: + msg_data['metadata'] = {} + msg_data['metadata']['project'] = project_name + + # Serialize back to JSON + modified_body = json.dumps(data).encode('utf-8') + + # Return modified message IMMEDIATELY (before FastA2A processes it) + return { + 'type': 'http.request', + 'body': modified_body, + 'more_body': False + } + except Exception as e: + _PRINTER.print(f"[A2A] Failed to inject project into payload: {e}") + + return message + + receive = receive_wrapper + # Update scope with cleaned path scope = dict(scope) scope['path'] = remaining_path diff --git a/python/helpers/file_tree.py b/python/helpers/file_tree.py new file mode 100644 index 0000000000..c53dbcda56 --- /dev/null +++ b/python/helpers/file_tree.py @@ -0,0 +1,660 @@ +from __future__ import annotations + +from collections import deque +from dataclasses import dataclass +from datetime import datetime, timezone +import os +from typing import Any, Callable, Iterable, Literal, Optional, Sequence + +from pathspec import PathSpec + +from python.helpers.files import get_abs_path + +SORT_BY_NAME = "name" +SORT_BY_CREATED = "created" +SORT_BY_MODIFIED = "modified" + +SORT_ASC = "asc" +SORT_DESC = "desc" + +OUTPUT_MODE_STRING = "string" +OUTPUT_MODE_FLAT = "flat" +OUTPUT_MODE_NESTED = "nested" + + +def file_tree( + relative_path: str, + *, + max_depth: int = 0, + max_lines: int = 0, + folders_first: bool = True, + max_folders: int = 0, + max_files: int = 0, + sort: tuple[Literal["name", "created", "modified"], Literal["asc", "desc"]] = ("modified", "desc"), + ignore: str | None = None, + output_mode: Literal["string", "flat", "nested"] = OUTPUT_MODE_STRING, +) -> str | list[dict]: + """Render a directory tree relative to the repository base path. + + Parameters: + relative_path: Base directory (relative to project root) to scan with :func:`get_abs_path`. + max_depth: Maximum depth of traversal (0 = unlimited). Depth starts at 1 for root entries. + max_lines: Global limit for rendered lines (0 = unlimited). When exceeded, the current depth + finishes rendering before deeper levels are skipped. + folders_first: When True, folders render before files within each directory. + max_folders: Optional per-directory cap (0 = unlimited) on rendered folder entries before adding a + ``# N more folders`` comment. When only a single folder exceeds the limit and ``max_folders`` is greater than zero, that folder is rendered + directly instead of emitting a summary comment. + max_files: Optional per-directory cap (0 = unlimited) on rendered file entries before adding a ``# N more files`` comment. + As with folders, a single excess file is rendered when ``max_files`` is greater than zero. + sort: Tuple of ``(key, direction)`` where key is one of :data:`SORT_BY_NAME`, + :data:`SORT_BY_CREATED`, or :data:`SORT_BY_MODIFIED`; direction is :data:`SORT_ASC` + or :data:`SORT_DESC`. + ignore: Inline ``.gitignore`` content or ``file:`` reference. Examples:: + + ignore=\"\"\"\\n*.pyc\\n__pycache__/\\n!important.py\\n\"\"\" + ignore=\"file:.gitignore\" # relative to scan root + ignore=\"file://.gitignore\" # URI-style relative path + ignore=\"file:/abs/path/.gitignore\" + ignore=\"file:///abs/path/.gitignore\" + + output_mode: One of :data:`OUTPUT_MODE_STRING`, :data:`OUTPUT_MODE_FLAT`, or + :data:`OUTPUT_MODE_NESTED`. + + Returns: + ``OUTPUT_MODE_STRING`` β†’ ``str``: multi-line ASCII tree. + ``OUTPUT_MODE_FLAT`` β†’ ``list[dict]``: flattened sequence of TreeItem dictionaries. + ``OUTPUT_MODE_NESTED`` β†’ ``list[dict]``: nested TreeItem dictionaries where folders + include ``items`` arrays. + + Notes: + * The utility is synchronous; avoid calling from latency-sensitive async loops. + * The ASCII renderer walks the established tree depth-first so connectors reflect parent/child structure, + while traversal and limit calculations remain breadth-first by depth. When ``max_lines`` is set, the number + of non-comment entries (excluding the root banner) never exceeds that limit; informational summary comments + are emitted in addition when necessary. + * ``created`` and ``modified`` values in structured outputs are timezone-aware UTC + :class:`datetime.datetime` objects:: + + item = flat_items[0] + iso = item[\"created\"].isoformat() + epoch = item[\"created\"].timestamp() + + """ + abs_root = get_abs_path(relative_path) + + if not os.path.exists(abs_root): + raise FileNotFoundError(f"Path does not exist: {relative_path!r}") + if not os.path.isdir(abs_root): + raise NotADirectoryError(f"Expected a directory, received: {relative_path!r}") + + sort_key, sort_direction = sort + if sort_key not in {SORT_BY_NAME, SORT_BY_CREATED, SORT_BY_MODIFIED}: + raise ValueError(f"Unsupported sort key: {sort_key!r}") + if sort_direction not in {SORT_ASC, SORT_DESC}: + raise ValueError(f"Unsupported sort direction: {sort_direction!r}") + if output_mode not in {OUTPUT_MODE_STRING, OUTPUT_MODE_FLAT, OUTPUT_MODE_NESTED}: + raise ValueError(f"Unsupported output mode: {output_mode!r}") + if max_depth < 0: + raise ValueError("max_depth must be >= 0") + if max_lines < 0: + raise ValueError("max_lines must be >= 0") + + ignore_spec = _resolve_ignore_patterns(ignore, abs_root) + + root_stat = os.stat(abs_root, follow_symlinks=False) + root_name = os.path.basename(os.path.normpath(abs_root)) or os.path.basename(abs_root) + root_node = _TreeEntry( + name=root_name, + level=0, + item_type="folder", + created=datetime.fromtimestamp(root_stat.st_ctime, tz=timezone.utc), + modified=datetime.fromtimestamp(root_stat.st_mtime, tz=timezone.utc), + parent=None, + items=[], + rel_path="", + ) + + queue: deque[tuple[_TreeEntry, str, int]] = deque([(root_node, abs_root, 1)]) + nodes_in_order: list[_TreeEntry] = [] + rendered_count = 0 + limit_reached = False + visibility_cache: dict[str, bool] = {} + + def make_entry(entry: os.DirEntry, parent: _TreeEntry, level: int, item_type: Literal["file", "folder"]) -> _TreeEntry: + stat = entry.stat(follow_symlinks=False) + rel_path = os.path.relpath(entry.path, abs_root) + rel_posix = _normalize_relative_path(rel_path) + return _TreeEntry( + name=entry.name, + level=level, + item_type=item_type, + created=datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc), + modified=datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc), + parent=parent, + items=[] if item_type == "folder" else None, + rel_path=rel_posix, + ) + + while queue and not limit_reached: + parent_node, current_dir, level = queue.popleft() + + if max_depth and level > max_depth: + continue + + remaining_depth = max_depth - level if max_depth else -1 + folders, files = _list_directory_children( + current_dir, + abs_root, + ignore_spec, + max_depth_remaining=remaining_depth, + cache=visibility_cache, + ) + + folder_entries = [make_entry(folder, parent_node, level, "folder") for folder in folders] + file_entries = [make_entry(file_entry, parent_node, level, "file") for file_entry in files] + + children = _apply_sorting_and_limits( + folder_entries, + file_entries, + folders_first=folders_first, + sort=sort, + max_folders=max_folders, + max_files=max_files, + directory_node=parent_node, + ) + + trimmed_children: list[_TreeEntry] = [] + hidden_children_local: list[_TreeEntry] = [] + if max_lines and rendered_count >= max_lines: + limit_reached = True + hidden_children_local = children + else: + for index, child in enumerate(children): + if max_lines and rendered_count >= max_lines: + limit_reached = True + hidden_children_local = children[index:] + break + trimmed_children.append(child) + nodes_in_order.append(child) + is_global_summary = ( + child.item_type == "comment" + and child.rel_path.endswith("#summary:limit") + ) + if not is_global_summary: + rendered_count += 1 + if limit_reached and hidden_children_local: + summary = _create_global_limit_comment( + parent_node, + hidden_children_local, + ) + trimmed_children.append(summary) + nodes_in_order.append(summary) + + parent_node.items = trimmed_children or None + + if limit_reached: + break + + for child in trimmed_children: + if child.item_type != "folder": + continue + if max_depth and level >= max_depth: + continue + child_abs = os.path.join(current_dir, child.name) + queue.append((child, child_abs, level + 1)) + + remaining_queue = list(queue) if limit_reached else [] + queue.clear() + + if limit_reached and remaining_queue: + for folder_node, folder_path, _ in remaining_queue: + summary = _create_folder_unprocessed_comment( + folder_node, + folder_path, + abs_root, + ignore_spec, + ) + if summary is None: + continue + folder_node.items = (folder_node.items or []) + [summary] + nodes_in_order.append(summary) + + visible_nodes = nodes_in_order + + visible_ids = {id(node) for node in visible_nodes} + if visible_ids: + _prune_to_visible(root_node, visible_ids) + + _mark_last_flags(root_node) + _refresh_render_metadata(root_node) + + def iter_visible() -> Iterable[_TreeEntry]: + for node in _iter_depth_first(root_node.items or []): + if not visible_ids or id(node) in visible_ids: + yield node + + if output_mode == OUTPUT_MODE_STRING: + display_name = relative_path.strip() or root_name + root_line = f"{display_name.rstrip(os.sep)}/" + lines = [root_line] + for node in iter_visible(): + lines.append(node.text) + return "\n".join(lines) + + if output_mode == OUTPUT_MODE_FLAT: + return _build_tree_items_flat(list(iter_visible())) + + return _to_nested_structure(root_node.items or []) + + +@dataclass(slots=True) +class _TreeEntry: + name: str + level: int + item_type: Literal["file", "folder", "comment"] + created: datetime + modified: datetime + parent: Optional["_TreeEntry"] = None + items: Optional[list["_TreeEntry"]] = None + is_last: bool = False + rel_path: str = "" + text: str = "" + + def as_dict(self) -> dict[str, Any]: + return { + "name": self.name, + "level": self.level, + "type": self.item_type, + "created": self.created, + "modified": self.modified, + "text": self.text, + "items": [child.as_dict() for child in self.items] if self.items is not None else None, + } + + +def _normalize_relative_path(path: str) -> str: + normalized = path.replace(os.sep, "/") + if normalized in {".", ""}: + return "" + while normalized.startswith("./"): + normalized = normalized[2:] + return normalized + + +def _directory_has_visible_entries( + directory: str, + root_abs_path: str, + ignore_spec: PathSpec, + cache: dict[str, bool], + max_depth_remaining: int, +) -> bool: + if max_depth_remaining == 0: + return False + + cached = cache.get(directory) + if cached is not None: + return cached + + try: + with os.scandir(directory) as iterator: + for entry in iterator: + rel_path = os.path.relpath(entry.path, root_abs_path) + rel_posix = _normalize_relative_path(rel_path) + is_dir = entry.is_dir(follow_symlinks=False) + + if is_dir: + ignored = ignore_spec.match_file(rel_posix) or ignore_spec.match_file(f"{rel_posix}/") + if ignored: + next_depth = max_depth_remaining - 1 if max_depth_remaining > 0 else -1 + if next_depth == 0: + continue + if _directory_has_visible_entries( + entry.path, + root_abs_path, + ignore_spec, + cache, + next_depth, + ): + cache[directory] = True + return True + continue + else: + if ignore_spec.match_file(rel_posix): + continue + + cache[directory] = True + return True + except FileNotFoundError: + cache[directory] = False + return False + + cache[directory] = False + return False + + +def _create_summary_comment(parent: _TreeEntry, noun: str, count: int) -> _TreeEntry: + label = noun + if count == 1 and noun.endswith("s"): + label = noun[:-1] + elif count > 1 and not noun.endswith("s"): + label = f"{noun}s" + return _TreeEntry( + name=f"{count} more {label}", + level=parent.level + 1, + item_type="comment", + created=parent.created, + modified=parent.modified, + parent=parent, + items=None, + rel_path=f"{parent.rel_path}#summary:{noun}:{count}", + ) + + +def _create_global_limit_comment(parent: _TreeEntry, hidden_children: Sequence[_TreeEntry]) -> _TreeEntry: + folders = sum(1 for child in hidden_children if child.item_type == "folder") + files = sum(1 for child in hidden_children if child.item_type == "file") + parts: list[str] = [] + if folders: + label = "folder" if folders == 1 else "folders" + parts.append(f"{folders} {label}") + if files: + label = "file" if files == 1 else "files" + parts.append(f"{files} {label}") + if not parts: + remaining = len(hidden_children) + label = "item" if remaining == 1 else "items" + parts.append(f"{remaining} {label}") + label_text = ", ".join(parts) + return _TreeEntry( + name=f"limit reached – hidden: {label_text}", + level=parent.level + 1, + item_type="comment", + created=parent.created, + modified=parent.modified, + parent=parent, + items=None, + rel_path=f"{parent.rel_path}#summary:limit", + ) + + +def _create_folder_unprocessed_comment( + folder_node: _TreeEntry, + folder_path: str, + abs_root: str, + ignore_spec: Optional[PathSpec], +) -> Optional[_TreeEntry]: + try: + folders, files = _list_directory_children( + folder_path, + abs_root, + ignore_spec, + max_depth_remaining=-1, + cache={}, + ) + except FileNotFoundError: + return None + + hidden_entries: list[_TreeEntry] = [] + for entry in folders: + stat = entry.stat(follow_symlinks=False) + hidden_entries.append( + _TreeEntry( + name=entry.name, + level=folder_node.level + 1, + item_type="folder", + created=datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc), + modified=datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc), + parent=folder_node, + items=None, + rel_path=os.path.join(folder_node.rel_path, entry.name), + ) + ) + for entry in files: + stat = entry.stat(follow_symlinks=False) + hidden_entries.append( + _TreeEntry( + name=entry.name, + level=folder_node.level + 1, + item_type="file", + created=datetime.fromtimestamp(stat.st_ctime, tz=timezone.utc), + modified=datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc), + parent=folder_node, + items=None, + rel_path=os.path.join(folder_node.rel_path, entry.name), + ) + ) + + if not hidden_entries: + return None + + return _create_global_limit_comment(folder_node, hidden_entries) + + +def _prune_to_visible(node: _TreeEntry, visible_ids: set[int]) -> None: + if node.items is None: + return + filtered: list[_TreeEntry] = [] + for child in node.items: + if not visible_ids or id(child) in visible_ids: + _prune_to_visible(child, visible_ids) + filtered.append(child) + node.items = filtered or None + + +def _mark_last_flags(node: _TreeEntry) -> None: + if node.items is None: + return + total = len(node.items) + for index, child in enumerate(node.items): + child.is_last = index == total - 1 + _mark_last_flags(child) + + +def _refresh_render_metadata(node: _TreeEntry) -> None: + if node.items is None: + return + for child in node.items: + child.text = _format_line(child) + _refresh_render_metadata(child) + + +def _resolve_ignore_patterns(ignore: str | None, root_abs_path: str) -> Optional[PathSpec]: + if ignore is None: + return None + + content: str + if ignore.startswith("file:"): + reference = ignore[5:] + if reference.startswith("///"): + reference_path = reference[2:] + elif reference.startswith("//"): + reference_path = os.path.join(root_abs_path, reference[2:]) + elif reference.startswith("/"): + reference_path = reference + else: + reference_path = os.path.join(root_abs_path, reference) + + try: + with open(reference_path, "r", encoding="utf-8") as handle: + content = handle.read() + except FileNotFoundError as exc: + raise FileNotFoundError(f"Ignore file not found: {reference_path}") from exc + else: + content = ignore + + lines = [ + line.strip() + for line in content.splitlines() + if line.strip() and not line.strip().startswith("#") + ] + + if not lines: + return None + + return PathSpec.from_lines("gitwildmatch", lines) + + +def _list_directory_children( + directory: str, + root_abs_path: str, + ignore_spec: Optional[PathSpec], + *, + max_depth_remaining: int, + cache: dict[str, bool], +) -> tuple[list[os.DirEntry], list[os.DirEntry]]: + folders: list[os.DirEntry] = [] + files: list[os.DirEntry] = [] + + try: + with os.scandir(directory) as iterator: + for entry in iterator: + if entry.name in (".", ".."): + continue + rel_path = os.path.relpath(entry.path, root_abs_path) + rel_posix = _normalize_relative_path(rel_path) + is_directory = entry.is_dir(follow_symlinks=False) + + if ignore_spec: + if is_directory: + ignored = ignore_spec.match_file(rel_posix) or ignore_spec.match_file(f"{rel_posix}/") + if ignored: + if _directory_has_visible_entries( + entry.path, + root_abs_path, + ignore_spec, + cache, + max_depth_remaining - 1, + ): + folders.append(entry) + continue + else: + if ignore_spec.match_file(rel_posix): + continue + + if is_directory: + folders.append(entry) + else: + files.append(entry) + except FileNotFoundError: + return ([], []) + + return (folders, files) + + +def _apply_sorting_and_limits( + folders: list[_TreeEntry], + files: list[_TreeEntry], + *, + folders_first: bool, + sort: tuple[str, str], + max_folders: int | None, + max_files: int | None, + directory_node: _TreeEntry, +) -> list[_TreeEntry]: + sort_key, sort_dir = sort + reverse = sort_dir == SORT_DESC + + def key_fn(node: _TreeEntry): + if sort_key == SORT_BY_NAME: + return node.name.casefold() + if sort_key == SORT_BY_CREATED: + return node.created + return node.modified + + folders_sorted = sorted(folders, key=key_fn, reverse=reverse) + files_sorted = sorted(files, key=key_fn, reverse=reverse) + combined: list[_TreeEntry] = [] + + def append_group(group: list[_TreeEntry], limit: int | None, noun: str) -> None: + if limit == 0: + limit = None + if not group: + return + if limit is None: + combined.extend(group) + return + + limit = max(limit, 0) + visible = group[:limit] + combined.extend(visible) + + overflow = group[limit:] + if not overflow: + return + + combined.append( + _create_summary_comment( + directory_node, + noun, + len(overflow), + ) + ) + + if folders_first: + append_group(folders_sorted, max_folders, "folder") + append_group(files_sorted, max_files, "file") + else: + append_group(files_sorted, max_files, "file") + append_group(folders_sorted, max_folders, "folder") + + return combined + + +def _format_line(node: _TreeEntry) -> str: + segments: list[str] = [] + ancestor = node.parent + while ancestor and ancestor.parent is not None: + segments.append(" " if ancestor.is_last else "β”‚ ") + ancestor = ancestor.parent + segments.reverse() + + connector = "└── " if node.is_last else "β”œβ”€β”€ " + if node.item_type == "folder": + label = f"{node.name}/" + elif node.item_type == "comment": + label = f"# {node.name}" + else: + label = node.name + + return "".join(segments) + connector + label + + +def _build_tree_items_flat(items: Sequence[_TreeEntry]) -> list[dict]: + return [ + { + "name": node.name, + "level": node.level, + "type": node.item_type, + "created": node.created, + "modified": node.modified, + "text": node.text, + "items": None, + } + for node in items + ] + + +def _to_nested_structure(items: Sequence[_TreeEntry]) -> list[dict]: + def convert(node: _TreeEntry) -> dict: + children = None + if node.items is not None: + children = [convert(child) for child in node.items] + return { + "name": node.name, + "level": node.level, + "type": node.item_type, + "created": node.created, + "modified": node.modified, + "text": node.text, + "items": children, + } + + return [convert(item) for item in items] + + +def _iter_depth_first(items: Sequence[_TreeEntry]) -> Iterable[_TreeEntry]: + for node in items: + yield node + if node.items: + yield from _iter_depth_first(node.items) diff --git a/python/helpers/files.py b/python/helpers/files.py index c90c701a20..0ed9cb06d6 100644 --- a/python/helpers/files.py +++ b/python/helpers/files.py @@ -14,15 +14,19 @@ import importlib.util import inspect import glob +import mimetypes +from simpleeval import simple_eval class VariablesPlugin(ABC): @abstractmethod - def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]: # type: ignore + def get_variables(self, file: str, backup_dirs: list[str] | None = None, **kwargs) -> dict[str, Any]: # type: ignore pass -def load_plugin_variables(file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]: +def load_plugin_variables( + file: str, backup_dirs: list[str] | None = None, **kwargs +) -> dict[str, Any]: if not file.endswith(".md"): return {} @@ -38,11 +42,14 @@ def load_plugin_variables(file: str, backup_dirs: list[str] | None = None) -> di plugin_file = None if plugin_file and exists(plugin_file): - + from python.helpers import extract_tools - classes = extract_tools.load_classes_from_file(plugin_file, VariablesPlugin, one_per_file=False) + + classes = extract_tools.load_classes_from_file( + plugin_file, VariablesPlugin, one_per_file=False + ) for cls in classes: - return cls().get_variables(file, backup_dirs) # type: ignore < abstract class here is ok, it is always a subclass + return cls().get_variables(file, backup_dirs, **kwargs) # type: ignore < abstract class here is ok, it is always a subclass # load python code and extract variables variables from it # module = None @@ -70,10 +77,13 @@ def load_plugin_variables(file: str, backup_dirs: list[str] | None = None) -> di # return cls[1]().get_variables() # type: ignore return {} + from python.helpers.strings import sanitize_string -def parse_file(_filename: str, _directories: list[str] | None = None, _encoding="utf-8", **kwargs): +def parse_file( + _filename: str, _directories: list[str] | None = None, _encoding="utf-8", **kwargs +): if _directories is None: _directories = [] @@ -84,10 +94,10 @@ def parse_file(_filename: str, _directories: list[str] | None = None, _encoding= with open(absolute_path, "r", encoding=_encoding) as f: # content = remove_code_fences(f.read()) content = f.read() - + is_json = is_full_json_template(content) content = remove_code_fences(content) - variables = load_plugin_variables(absolute_path, _directories) or {} # type: ignore + variables = load_plugin_variables(absolute_path, _directories, **kwargs) or {} # type: ignore variables.update(kwargs) if is_json: content = replace_placeholders_json(content, **variables) @@ -99,12 +109,16 @@ def parse_file(_filename: str, _directories: list[str] | None = None, _encoding= # Process include statements content = process_includes( # here we use kwargs, the plugin variables are not inherited - content, _directories, **kwargs + content, + _directories, + **kwargs, ) return content -def read_prompt_file(_file: str, _directories: list[str] | None = None, _encoding="utf-8", **kwargs): +def read_prompt_file( + _file: str, _directories: list[str] | None = None, _encoding="utf-8", **kwargs +): if _directories is None: _directories = [] @@ -122,22 +136,74 @@ def read_prompt_file(_file: str, _directories: list[str] | None = None, _encodin # content = remove_code_fences(f.read()) content = f.read() - variables = load_plugin_variables(_file, _directories) or {} # type: ignore + variables = load_plugin_variables(_file, _directories, **kwargs) or {} # type: ignore variables.update(kwargs) + # evaluate conditions + content = evaluate_text_conditions(content, **variables) + # Replace placeholders with values from kwargs content = replace_placeholders_text(content, **variables) # Process include statements content = process_includes( # here we use kwargs, the plugin variables are not inherited - content, _directories, **kwargs + content, + _directories, + **kwargs, ) return content -def read_file(relative_path:str, encoding="utf-8"): +def evaluate_text_conditions(_content: str, **kwargs): + # search for {{if ...}} ... {{endif}} blocks and evaluate conditions with nesting support + if_pattern = re.compile(r"{{\s*if\s+(.*?)}}", flags=re.DOTALL) + token_pattern = re.compile(r"{{\s*(if\b.*?|endif)\s*}}", flags=re.DOTALL) + + def _process(text: str) -> str: + m_if = if_pattern.search(text) + if not m_if: + return text + + depth = 1 + pos = m_if.end() + while True: + m = token_pattern.search(text, pos) + if not m: + # Unterminated if-block, do not modify text + return text + token = m.group(1) + depth += 1 if token.startswith("if ") else -1 + if depth == 0: + break + pos = m.end() + + before = text[: m_if.start()] + condition = m_if.group(1).strip() + inner = text[m_if.end() : m.start()] + after = text[m.end() :] + + try: + result = simple_eval(condition, names=kwargs) + except Exception: + # On evaluation error, do not modify this block + return text + + if result: + # Keep inner content (processed recursively), remove if/endif markers + kept = before + _process(inner) + else: + # Skip entire block, including inner content and markers + kept = before + + # Continue processing the remaining text after this block + return kept + _process(after) + + return _process(_content) + + +def read_file(relative_path: str, encoding="utf-8"): # Try to get the absolute path for the file from the original directory or backup directories absolute_path = get_abs_path(relative_path) @@ -146,7 +212,7 @@ def read_file(relative_path:str, encoding="utf-8"): return f.read() -def read_file_bin(relative_path:str): +def read_file_bin(relative_path: str): # Try to get the absolute path for the file from the original directory or backup directories absolute_path = get_abs_path(relative_path) @@ -177,8 +243,9 @@ def replace_placeholders_json(_content: str, **kwargs): # Replace placeholders with values from kwargs for key, value in kwargs.items(): placeholder = "{{" + key + "}}" - strval = json.dumps(value) - _content = _content.replace(placeholder, strval) + if placeholder in _content: + strval = json.dumps(value) + _content = _content.replace(placeholder, strval) return _content @@ -248,6 +315,7 @@ def find_file_in_dirs(_filename: str, _directories: list[str]): f"File '{_filename}' not found in any of the provided directories." ) + def get_unique_filenames_in_dirs(dir_paths: list[str], pattern: str = "*"): # returns absolute paths for unique filenames, priority by order in dir_paths seen = set() @@ -263,6 +331,7 @@ def get_unique_filenames_in_dirs(dir_paths: list[str], pattern: str = "*"): result.sort(key=lambda path: os.path.basename(path)) return result + def remove_code_fences(text): # Pattern to match code fences with optional language specifier pattern = r"(```|~~~)(.*?\n)(.*?)(\1)" @@ -335,6 +404,45 @@ def delete_dir(relative_path: str): pass +def move_dir(old_path: str, new_path: str): + # rename/move the directory from old_path to new_path (both relative) + abs_old = get_abs_path(old_path) + abs_new = get_abs_path(new_path) + if not os.path.isdir(abs_old): + return # nothing to rename + try: + os.rename(abs_old, abs_new) + except Exception: + pass # suppress all errors, keep behavior consistent + + +# move dir safely, remove with number if needed +def move_dir_safe(src, dst, rename_format="{name}_{number}"): + base_dst = dst + i = 2 + while exists(dst): + dst = rename_format.format(name=base_dst, number=i) + i += 1 + move_dir(src, dst) + return dst + + +# create dir safely, add number if needed +def create_dir_safe(dst, rename_format="{name}_{number}"): + base_dst = dst + i = 2 + while exists(dst): + dst = rename_format.format(name=base_dst, number=i) + i += 1 + create_dir(dst) + return dst + + +def create_dir(relative_path: str): + abs_path = get_abs_path(relative_path) + os.makedirs(abs_path, exist_ok=True) + + def list_files(relative_path: str, filter: str = "*"): abs_path = get_abs_path(relative_path) if not os.path.exists(abs_path): @@ -351,18 +459,30 @@ def get_abs_path(*relative_paths): "Convert relative paths to absolute paths based on the base directory." return os.path.join(get_base_dir(), *relative_paths) -def deabsolute_path(path:str): + +def deabsolute_path(path: str): "Convert absolute paths to relative paths based on the base directory." return os.path.relpath(path, get_base_dir()) -def fix_dev_path(path:str): + +def fix_dev_path(path: str): "On dev environment, convert /a0/... paths to local absolute paths" from python.helpers.runtime import is_development + if is_development(): if path.startswith("/a0/"): path = path.replace("/a0/", "") return get_abs_path(path) + +def normalize_a0_path(path: str): + "Convert absolute paths into /a0/... paths" + if is_in_base_dir(path): + deabs = deabsolute_path(path) + return "/a0/" + deabs + return path + + def exists(*relative_paths): path = get_abs_path(*relative_paths) return os.path.exists(path) @@ -436,4 +556,45 @@ def move_file(relative_path: str, new_path: str): def safe_file_name(filename: str) -> str: # Replace any character that's not alphanumeric, dash, underscore, or dot with underscore - return re.sub(r'[^a-zA-Z0-9-._]', '_', filename) + return re.sub(r"[^a-zA-Z0-9-._]", "_", filename) + + +def read_text_files_in_dir( + dir_path: str, max_size: int = 1024 * 1024, pattern: str = "*" +) -> dict[str, str]: + + abs_path = get_abs_path(dir_path) + if not os.path.exists(abs_path): + return {} + result = {} + for file_path in [os.path.join(abs_path, f) for f in os.listdir(abs_path)]: + try: + if not os.path.isfile(file_path): + continue + if not fnmatch(os.path.basename(file_path), pattern): + continue + if max_size > 0 and os.path.getsize(file_path) > max_size: + continue + mime, _ = mimetypes.guess_type(file_path) + if mime is not None and not mime.startswith("text"): + continue + # Check if file is binary by reading a small chunk + content = read_file(file_path) + result[os.path.basename(file_path)] = content + except Exception: + continue + return result + +def list_files_in_dir_recursively(relative_path: str) -> list[str]: + abs_path = get_abs_path(relative_path) + if not os.path.exists(abs_path): + return [] + result = [] + for root, dirs, files in os.walk(abs_path): + for file in files: + file_path = os.path.join(root, file) + # Return relative path from the base directory + rel_path = os.path.relpath(file_path, abs_path) + result.append(rel_path) + return result + \ No newline at end of file diff --git a/python/helpers/git.py b/python/helpers/git.py index 0e112f4a71..33e3bec224 100644 --- a/python/helpers/git.py +++ b/python/helpers/git.py @@ -47,4 +47,11 @@ def get_git_info(): "version": version } - return git_info \ No newline at end of file + return git_info + +def get_version(): + try: + git_info = get_git_info() + return str(git_info.get("short_tag", "")).strip() or "unknown" + except Exception: + return "unknown" \ No newline at end of file diff --git a/python/helpers/knowledge_import.py b/python/helpers/knowledge_import.py index 4457650505..a68fe9b825 100644 --- a/python/helpers/knowledge_import.py +++ b/python/helpers/knowledge_import.py @@ -36,6 +36,7 @@ def load_knowledge( index: Dict[str, KnowledgeImport], metadata: dict[str, Any] = {}, filename_pattern: str = "**/*", + recursive: bool = True, ) -> Dict[str, KnowledgeImport]: """ Load knowledge files from a directory with change detection and metadata enhancement. @@ -96,7 +97,7 @@ def load_knowledge( # Fetch all files in the directory with specified extensions try: - kn_files = glob.glob(os.path.join(knowledge_dir, filename_pattern), recursive=True) + kn_files = glob.glob(os.path.join(knowledge_dir, filename_pattern), recursive=recursive) kn_files = [f for f in kn_files if os.path.isfile(f) and not os.path.basename(f).startswith('.')] except Exception as e: PrintStyle(font_color="red").print(f"Error scanning knowledge directory {knowledge_dir}: {e}") diff --git a/python/helpers/log.py b/python/helpers/log.py index 098862a954..d231810d25 100644 --- a/python/helpers/log.py +++ b/python/helpers/log.py @@ -1,6 +1,7 @@ from dataclasses import dataclass, field import json -from typing import Any, Literal, Optional, Dict, TypeVar +import time +from typing import Any, Literal, Optional, Dict, TypeVar, TYPE_CHECKING T = TypeVar("T") import uuid @@ -8,6 +9,11 @@ from python.helpers.strings import truncate_text_by_ratio import copy from typing import TypeVar +from python.helpers.secrets import get_secrets_manager + + +if TYPE_CHECKING: + from agent import AgentContext T = TypeVar("T") @@ -31,9 +37,10 @@ HEADING_MAX_LEN: int = 120 -CONTENT_MAX_LEN: int = 10000 +CONTENT_MAX_LEN: int = 15_000 +RESPONSE_CONTENT_MAX_LEN: int = 250_000 KEY_MAX_LEN: int = 60 -VALUE_MAX_LEN: int = 3000 +VALUE_MAX_LEN: int = 5000 PROGRESS_MAX_LEN: int = 120 @@ -88,18 +95,21 @@ def _truncate_value(val: T) -> T: return truncated -def _truncate_content(text: str | None) -> str: +def _truncate_content(text: str | None, type: Type) -> str: + + max_len = CONTENT_MAX_LEN if type != "response" else RESPONSE_CONTENT_MAX_LEN + if text is None: return "" raw = str(text) - if len(raw) <= CONTENT_MAX_LEN: + if len(raw) <= max_len: return raw # Same dynamic replacement logic as value truncation - removed = len(raw) - CONTENT_MAX_LEN + removed = len(raw) - max_len while True: replacement = f"\n\n<< {removed} Characters hidden >>\n\n" - truncated = truncate_text_by_ratio(raw, CONTENT_MAX_LEN, replacement, ratio=0.3) + truncated = truncate_text_by_ratio(raw, max_len, replacement, ratio=0.3) new_removed = len(raw) - (len(truncated) - len(replacement)) if new_removed == removed: break @@ -107,31 +117,14 @@ def _truncate_content(text: str | None) -> str: return truncated -def _mask_recursive(obj: T) -> T: - """Recursively mask secrets in nested objects.""" - try: - from python.helpers.secrets import SecretsManager - - secrets_mgr = SecretsManager.get_instance() - if isinstance(obj, str): - return secrets_mgr.mask_values(obj) - elif isinstance(obj, dict): - return {k: _mask_recursive(v) for k, v in obj.items()} # type: ignore - elif isinstance(obj, list): - return [_mask_recursive(item) for item in obj] # type: ignore - else: - return obj - except Exception as _e: - # If masking fails, return original object - return obj @dataclass class LogItem: log: "Log" no: int - type: str + type: Type heading: str = "" content: str = "" temp: bool = False @@ -139,9 +132,13 @@ class LogItem: kvps: Optional[OrderedDict] = None # Use OrderedDict for kvps id: Optional[str] = None # Add id field guid: str = "" + timestamp: float = 0.0 + duration_ms: Optional[int] = None + agent_number: int = 0 def __post_init__(self): self.guid = self.log.guid + self.timestamp = self.timestamp or time.time() def update( self, @@ -189,12 +186,16 @@ def output(self): "content": self.content, "temp": self.temp, "kvps": self.kvps, + "timestamp": self.timestamp, + "duration_ms": self.duration_ms, + "agent_number": self.agent_number, } class Log: def __init__(self): + self.context: "AgentContext|None" = None # set from outside self.guid: str = str(uuid.uuid4()) self.updates: list[int] = [] self.logs: list[LogItem] = [] @@ -208,16 +209,27 @@ def log( kvps: dict | None = None, temp: bool | None = None, update_progress: ProgressUpdate | None = None, - id: Optional[str] = None, # Add id parameter + id: Optional[str] = None, **kwargs, ) -> LogItem: # add a minimal item to the log + # Determine agent number from streaming agent + agent_number = 0 + if self.context and self.context.streaming_agent: + agent_number = self.context.streaming_agent.number + item = LogItem( log=self, no=len(self.logs), type=type, + agent_number=agent_number, ) + # Set duration on previous item and mark it as updated + if self.logs: + prev = self.logs[-1] + prev.duration_ms = int((item.timestamp - prev.timestamp) * 1000) + self.updates += [prev.no] self.logs.append(item) # and update it (to have just one implementation) @@ -237,55 +249,56 @@ def log( def _update_item( self, no: int, - type: str | None = None, + type: Type | None = None, heading: str | None = None, content: str | None = None, kvps: dict | None = None, temp: bool | None = None, update_progress: ProgressUpdate | None = None, - id: Optional[str] = None, # Add id parameter + id: Optional[str] = None, **kwargs, ): item = self.logs[no] + if id is not None: + item.id = id + + if type is not None: + item.type = type + + if temp is not None: + item.temp = temp + + if update_progress is not None: + item.update_progress = update_progress + + # adjust all content before processing if heading is not None: - heading = _mask_recursive(heading) + heading = self._mask_recursive(heading) heading = _truncate_heading(heading) item.heading = heading if content is not None: - content = _mask_recursive(content) - content = _truncate_content(content) + content = self._mask_recursive(content) + content = _truncate_content(content, item.type) item.content = content if kvps is not None: kvps = OrderedDict(copy.deepcopy(kvps)) - kvps = _mask_recursive(kvps) + kvps = self._mask_recursive(kvps) kvps = _truncate_value(kvps) item.kvps = kvps elif item.kvps is None: item.kvps = OrderedDict() if kwargs: kwargs = copy.deepcopy(kwargs) - kwargs = _mask_recursive(kwargs) + kwargs = self._mask_recursive(kwargs) item.kvps.update(kwargs) - if type is not None: - item.type = type - - if update_progress is not None: - item.update_progress = update_progress - - if temp is not None: - item.temp = temp - - if id is not None: - item.id = id - self.updates += [item.no] self._update_progress_from_item(item) def set_progress(self, progress: str, no: int = 0, active: bool = True): - progress = _mask_recursive(progress) + progress = self._mask_recursive(progress) progress = _truncate_progress(progress) self.progress = progress if not no: @@ -324,3 +337,28 @@ def _update_progress_from_item(self, item: LogItem): item.heading, (item.no if item.update_progress == "persistent" else -1), ) + + def _mask_recursive(self, obj: T) -> T: + """Recursively mask secrets in nested objects.""" + try: + from agent import AgentContext + secrets_mgr = get_secrets_manager(self.context or AgentContext.current()) + + # debug helper to identify context mismatch + # self_id = self.context.id if self.context else None + # current_ctx = AgentContext.current() + # current_id = current_ctx.id if current_ctx else None + # if self_id != current_id: + # print(f"Context ID mismatch: {self_id} != {current_id}") + + if isinstance(obj, str): + return secrets_mgr.mask_values(obj) + elif isinstance(obj, dict): + return {k: self._mask_recursive(v) for k, v in obj.items()} # type: ignore + elif isinstance(obj, list): + return [self._mask_recursive(item) for item in obj] # type: ignore + else: + return obj + except Exception as _e: + # If masking fails, return original object + return obj \ No newline at end of file diff --git a/python/helpers/login.py b/python/helpers/login.py new file mode 100644 index 0000000000..046f6131db --- /dev/null +++ b/python/helpers/login.py @@ -0,0 +1,15 @@ +from python.helpers import dotenv +import hashlib + + +def get_credentials_hash(): + user = dotenv.get_dotenv_value("AUTH_LOGIN") + password = dotenv.get_dotenv_value("AUTH_PASSWORD") + if not user: + return None + return hashlib.sha256(f"{user}:{password}".encode()).hexdigest() + + +def is_login_required(): + user = dotenv.get_dotenv_value("AUTH_LOGIN") + return bool(user) diff --git a/python/helpers/mcp_server.py b/python/helpers/mcp_server.py index 4c080da69c..3c0308ed9c 100644 --- a/python/helpers/mcp_server.py +++ b/python/helpers/mcp_server.py @@ -3,23 +3,26 @@ from urllib.parse import urlparse from openai import BaseModel from pydantic import Field -from fastmcp import FastMCP +from fastmcp import FastMCP # type: ignore +import contextvars from agent import AgentContext, AgentContextType, UserMessage from python.helpers.persist_chat import remove_chat from initialize import initialize_agent from python.helpers.print_style import PrintStyle -from python.helpers import settings +from python.helpers import settings, projects from starlette.middleware import Middleware from starlette.middleware.base import BaseHTTPMiddleware from starlette.exceptions import HTTPException as StarletteHTTPException from starlette.types import ASGIApp, Receive, Scope, Send -from fastmcp.server.http import create_sse_app +from fastmcp.server.http import create_sse_app # type: ignore from starlette.requests import Request import threading _PRINTER = PrintStyle(italic=True, font_color="green", padding=False) +# Context variable to store project name from URL (per-request) +_mcp_project_name: contextvars.ContextVar[str | None] = contextvars.ContextVar('mcp_project_name', default=None) mcp_server: FastMCP = FastMCP( name="Agent Zero integrated MCP Server", @@ -127,6 +130,9 @@ async def send_message( description="The response from the remote Agent Zero Instance", title="response" ), ]: + # Get project name from context variable (set in proxy __call__) + project_name = _mcp_project_name.get() + context: AgentContext | None = None if chat_id: context = AgentContext.get(chat_id) @@ -137,10 +143,26 @@ async def send_message( # whether we should save the chat or delete it afterwards # If we continue a conversation, it must be persistent persistent_chat = True + + # Validation: if project is in URL but context has different project + if project_name: + existing_project = context.get_data(projects.CONTEXT_DATA_KEY_PROJECT) + if existing_project and existing_project != project_name: + return ToolError( + error=f"Chat belongs to project '{existing_project}' but URL specifies '{project_name}'", + chat_id=chat_id + ) else: config = initialize_agent() context = AgentContext(config=config, type=AgentContextType.BACKGROUND) + # Activate project if specified in URL + if project_name: + try: + projects.activate_project(context.id, project_name) + except Exception as e: + return ToolError(error=f"Failed to activate project: {str(e)}", chat_id="") + if not message: return ToolError( error="Message is required", chat_id=context.id if persistent_chat else "" @@ -325,10 +347,10 @@ def reconfigure(self, token: str): def _create_custom_http_app(self, streamable_http_path, auth_server_provider, auth_settings, debug, routes): """Create a custom HTTP app that manages the session manager manually.""" - from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app - from mcp.server.streamable_http_manager import StreamableHTTPSessionManager + from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app # type: ignore + from mcp.server.streamable_http_manager import StreamableHTTPSessionManager # type: ignore from starlette.routing import Mount - from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware + from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware # type: ignore import anyio server_routes = [] @@ -408,12 +430,44 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: # Route based on path path = scope.get("path", "") - if f"/t-{self.token}/sse" in path or f"t-{self.token}/messages" in path: - # Route to SSE app - await sse_app(scope, receive, send) - elif f"/t-{self.token}/http" in path: - # Route to HTTP app - await http_app(scope, receive, send) + # Check for token in path (with or without project segment) + # Patterns: /t-{token}/sse, /t-{token}/p-{project}/sse, etc. + has_token = f"/t-{self.token}/" in path or f"t-{self.token}/" in path + + # Extract project from path BEFORE cleaning and set in context variable + project_name = None + if "/p-" in path: + try: + parts = path.split("/p-") + if len(parts) > 1: + project_part = parts[1].split("/")[0] + if project_part: + project_name = project_part + _PRINTER.print(f"[MCP] Proxy extracted project from URL: {project_name}") + except Exception as e: + _PRINTER.print(f"[MCP] Failed to extract project in proxy: {e}") + + # Store project in context variable (will be available in send_message) + _mcp_project_name.set(project_name) + + # Strip project segment from path if present (e.g., /p-project_name/) + # This is needed because the underlying MCP apps were configured without project paths + cleaned_path = path + if "/p-" in path: + # Remove /p-{project}/ segment: /t-TOKEN/p-PROJECT/sse -> /t-TOKEN/sse + import re + cleaned_path = re.sub(r'/p-[^/]+/', '/', path) + + # Update scope with cleaned path for the underlying app + modified_scope = dict(scope) + modified_scope['path'] = cleaned_path + + if has_token and ("/sse" in path or "/messages" in path): + # Route to SSE app with cleaned path + await sse_app(modified_scope, receive, send) + elif has_token and "/http" in path: + # Route to HTTP app with cleaned path + await http_app(modified_scope, receive, send) else: raise StarletteHTTPException( status_code=403, detail="MCP forbidden" @@ -421,7 +475,7 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: async def mcp_middleware(request: Request, call_next): - + """Middleware to check if MCP server is enabled.""" # check if MCP server is enabled cfg = settings.get_settings() if not cfg["mcp_server_enabled"]: diff --git a/python/helpers/memory.py b/python/helpers/memory.py index 9e7fb5eb47..8c8785c5af 100644 --- a/python/helpers/memory.py +++ b/python/helpers/memory.py @@ -28,7 +28,7 @@ from python.helpers import knowledge_import from python.helpers.log import Log, LogItem from enum import Enum -from agent import Agent +from agent import Agent, AgentContext import models import logging from simpleeval import simple_eval @@ -63,7 +63,7 @@ class Area(Enum): @staticmethod async def get(agent: Agent): - memory_subdir = agent.config.memory_subdir or "default" + memory_subdir = get_agent_memory_subdir(agent) if Memory.index.get(memory_subdir) is None: log_item = agent.context.log.log( type="util", @@ -77,10 +77,11 @@ async def get(agent: Agent): ) Memory.index[memory_subdir] = db wrap = Memory(db, memory_subdir=memory_subdir) - if agent.config.knowledge_subdirs: - await wrap.preload_knowledge( - log_item, agent.config.knowledge_subdirs, memory_subdir - ) + knowledge_subdirs = get_knowledge_subdirs_by_memory_subdir( + memory_subdir, agent.config.knowledge_subdirs or [] + ) + if knowledge_subdirs: + await wrap.preload_knowledge(log_item, knowledge_subdirs, memory_subdir) return wrap else: return Memory( @@ -106,16 +107,20 @@ async def get_by_subdir( in_memory=False, ) wrap = Memory(db, memory_subdir=memory_subdir) - if preload_knowledge and agent_config.knowledge_subdirs: - await wrap.preload_knowledge( - log_item, agent_config.knowledge_subdirs, memory_subdir + if preload_knowledge: + knowledge_subdirs = get_knowledge_subdirs_by_memory_subdir( + memory_subdir, agent_config.knowledge_subdirs or [] ) + if knowledge_subdirs: + await wrap.preload_knowledge( + log_item, knowledge_subdirs, memory_subdir + ) Memory.index[memory_subdir] = db return Memory(db=Memory.index[memory_subdir], memory_subdir=memory_subdir) @staticmethod async def reload(agent: Agent): - memory_subdir = agent.config.memory_subdir or "default" + memory_subdir = get_agent_memory_subdir(agent) if Memory.index.get(memory_subdir): del Memory.index[memory_subdir] return await Memory.get(agent) @@ -136,7 +141,7 @@ def initialize( em_dir = files.get_abs_path( "memory/embeddings" ) # just caching, no need to parameterize - db_dir = Memory._abs_db_dir(memory_subdir) + db_dir = abs_db_dir(memory_subdir) # make sure embeddings and database directories exist os.makedirs(db_dir, exist_ok=True) @@ -249,7 +254,7 @@ async def preload_knowledge( log_item.update(heading="Preloading knowledge...") # db abs path - db_dir = Memory._abs_db_dir(memory_subdir) + db_dir = abs_db_dir(memory_subdir) # Load the index file if it exists index_path = files.get_abs_path(db_dir, "knowledge_import.json") @@ -298,12 +303,24 @@ def _preload_knowledge_folders( ): # load knowledge folders, subfolders by area for kn_dir in kn_dirs: + # everything in the root of the knowledge goes to main + index = knowledge_import.load_knowledge( + log_item, + abs_knowledge_dir(kn_dir), + index, + {"area": Memory.Area.MAIN}, + filename_pattern="*", + recursive=False, + ) + # subdirectories go to their folders for area in Memory.Area: index = knowledge_import.load_knowledge( log_item, - files.get_abs_path("knowledge", kn_dir, area.value), + # files.get_abs_path("knowledge", kn_dir, area.value), + abs_knowledge_dir(kn_dir, area.value), index, {"area": area.value}, + recursive=True, ) # load instruments descriptions @@ -313,6 +330,7 @@ def _preload_knowledge_folders( index, {"area": Memory.Area.INSTRUMENTS.value}, filename_pattern="**/*.md", + recursive=True, ) return index @@ -418,7 +436,7 @@ def _generate_doc_id(self): @staticmethod def _save_db_file(db: MyFaiss, memory_subdir: str): - abs_dir = Memory._abs_db_dir(memory_subdir) + abs_dir = abs_db_dir(memory_subdir) db.save_local(folder_path=abs_dir) @staticmethod @@ -446,10 +464,6 @@ def _cosine_normalizer(val: float) -> float: ) # float precision can cause values like 1.0000000596046448 return res - @staticmethod - def _abs_db_dir(memory_subdir: str) -> str: - return files.get_abs_path("memory", memory_subdir) - @staticmethod def format_docs_plain(docs: list[Document]) -> list[str]: result = [] @@ -466,10 +480,6 @@ def get_timestamp(): return datetime.now().strftime("%Y-%m-%d %H:%M:%S") -def get_memory_subdir_abs(agent: Agent) -> str: - return files.get_abs_path("memory", agent.config.memory_subdir or "default") - - def get_custom_knowledge_subdir_abs(agent: Agent) -> str: for dir in agent.config.knowledge_subdirs: if dir != "default": @@ -480,3 +490,86 @@ def get_custom_knowledge_subdir_abs(agent: Agent) -> str: def reload(): # clear the memory index, this will force all DBs to reload Memory.index = {} + + +def abs_db_dir(memory_subdir: str) -> str: + # patch for projects, this way we don't need to re-work the structure of memory subdirs + if memory_subdir.startswith("projects/"): + from python.helpers.projects import get_project_meta_folder + + return files.get_abs_path(get_project_meta_folder(memory_subdir[9:]), "memory") + # standard subdirs + return files.get_abs_path("memory", memory_subdir) + + +def abs_knowledge_dir(knowledge_subdir: str, *sub_dirs: str) -> str: + # patch for projects, this way we don't need to re-work the structure of knowledge subdirs + if knowledge_subdir.startswith("projects/"): + from python.helpers.projects import get_project_meta_folder + + return files.get_abs_path( + get_project_meta_folder(knowledge_subdir[9:]), "knowledge", *sub_dirs + ) + # standard subdirs + return files.get_abs_path("knowledge", knowledge_subdir, *sub_dirs) + + +def get_memory_subdir_abs(agent: Agent) -> str: + subdir = get_agent_memory_subdir(agent) + return abs_db_dir(subdir) + + +def get_agent_memory_subdir(agent: Agent) -> str: + # if project is active, use project memory subdir + return get_context_memory_subdir(agent.context) + + +def get_context_memory_subdir(context: AgentContext) -> str: + # if project is active, use project memory subdir + from python.helpers.projects import ( + get_context_memory_subdir as get_project_memory_subdir, + ) + + memory_subdir = get_project_memory_subdir(context) + if memory_subdir: + return memory_subdir + + # no project, regular memory subdir + return context.config.memory_subdir or "default" + + +def get_existing_memory_subdirs() -> list[str]: + try: + from python.helpers.projects import ( + get_project_meta_folder, + get_projects_parent_folder, + ) + + # Get subdirectories from memory folder + subdirs = files.get_subdirectories("memory", exclude="embeddings") + + project_subdirs = files.get_subdirectories(get_projects_parent_folder()) + for project_subdir in project_subdirs: + if files.exists( + get_project_meta_folder(project_subdir), "memory", "index.faiss" + ): + subdirs.append(f"projects/{project_subdir}") + + # Ensure 'default' is always available + if "default" not in subdirs: + subdirs.insert(0, "default") + + return subdirs + except Exception as e: + PrintStyle.error(f"Failed to get memory subdirectories: {str(e)}") + return ["default"] + + +def get_knowledge_subdirs_by_memory_subdir( + memory_subdir: str, default: list[str] +) -> list[str]: + if memory_subdir.startswith("projects/"): + from python.helpers.projects import get_project_meta_folder + + default.append(get_project_meta_folder(memory_subdir[9:], "knowledge")) + return default diff --git a/python/helpers/model_discovery.py b/python/helpers/model_discovery.py new file mode 100644 index 0000000000..478d2de9f9 --- /dev/null +++ b/python/helpers/model_discovery.py @@ -0,0 +1,439 @@ +""" +Dynamic Model Discovery Service for Agent Zero + +Fetches available models DIRECTLY from each provider's API based on +which API keys the user has configured. No hardcoded model lists. + +Supported Providers: +- OpenAI: GET https://api.openai.com/v1/models +- Anthropic: GET https://api.anthropic.com/v1/models +- Google Gemini: GET https://generativelanguage.googleapis.com/v1beta/models +- Groq: GET https://api.groq.com/openai/v1/models +- Mistral: GET https://api.mistral.ai/v1/models +- DeepSeek: GET https://api.deepseek.com/models +- xAI: GET https://api.x.ai/v1/models +- OpenRouter: GET https://openrouter.ai/api/v1/models +- SambaNova: GET https://api.sambanova.ai/v1/models +- And any OpenAI-compatible provider with api_base set +""" + +import json +import os +import time +from typing import Any + +from python.helpers import files +from python.helpers.print_style import PrintStyle +from python.helpers.providers import FieldOption + +# Cache configuration +CACHE_FILE = "tmp/model_cache.json" +CACHE_TTL_SECONDS = 1 * 60 * 60 # 1 hour (more frequent than before since we want fresh data) + +# Provider API endpoints +PROVIDER_ENDPOINTS = { + "openai": { + "url": "https://api.openai.com/v1/models", + "auth_type": "bearer", + }, + "anthropic": { + "url": "https://api.anthropic.com/v1/models", + "auth_type": "anthropic", + }, + "google": { + "url": "https://generativelanguage.googleapis.com/v1beta/models", + "auth_type": "query_key", + }, + "groq": { + "url": "https://api.groq.com/openai/v1/models", + "auth_type": "bearer", + }, + "mistral": { + "url": "https://api.mistral.ai/v1/models", + "auth_type": "bearer", + }, + "deepseek": { + "url": "https://api.deepseek.com/models", + "auth_type": "bearer", + }, + "xai": { + "url": "https://api.x.ai/v1/models", + "auth_type": "bearer", + }, + "openrouter": { + "url": "https://openrouter.ai/api/v1/models", + "auth_type": "bearer", + "extra_headers": { + "HTTP-Referer": "https://agent-zero.ai/", + "X-Title": "Agent Zero", + }, + }, + "sambanova": { + "url": "https://api.sambanova.ai/v1/models", + "auth_type": "bearer", + }, +} + +# Providers that are OpenAI-compatible and can use custom api_base +OPENAI_COMPATIBLE_PROVIDERS = { + "lm_studio", + "ollama", + "venice", + "a0_venice", + "azure", + "other", + "zai", + "zai_coding", +} + + +def _load_cache() -> dict[str, Any] | None: + """Load cached model data if valid.""" + cache_path = files.get_abs_path(CACHE_FILE) + if not os.path.exists(cache_path): + return None + + try: + with open(cache_path, "r", encoding="utf-8") as f: + cache = json.load(f) + + # Check TTL + cached_at = cache.get("cached_at", 0) + if (time.time() - cached_at) > CACHE_TTL_SECONDS: + return None + + return cache + except (json.JSONDecodeError, IOError) as e: + PrintStyle.warning(f"Failed to load model cache: {e}") + return None + + +def _save_cache(data: dict[str, Any]): + """Save model data to cache.""" + cache_path = files.get_abs_path(CACHE_FILE) + try: + os.makedirs(os.path.dirname(cache_path), exist_ok=True) + data["cached_at"] = time.time() + with open(cache_path, "w", encoding="utf-8") as f: + json.dump(data, f, indent=2) + except IOError as e: + PrintStyle.warning(f"Failed to save model cache: {e}") + + +def _get_cached_models(provider: str, model_type: str) -> list[dict[str, str]] | None: + """Get cached models for a provider if available.""" + cache = _load_cache() + if cache: + key = f"{provider}_{model_type}" + return cache.get("providers", {}).get(key) + return None + + +def _cache_models(provider: str, model_type: str, models: list[dict[str, str]]): + """Cache models for a provider.""" + cache = _load_cache() or {"providers": {}} + if "providers" not in cache: + cache["providers"] = {} + key = f"{provider}_{model_type}" + cache["providers"][key] = models + _save_cache(cache) + + +def _filter_models_by_type( + models: list[dict[str, str]], model_type: str, provider: str +) -> list[dict[str, str]]: + """Filter models based on type (chat vs embedding).""" + if model_type == "embedding": + # Look for embedding models + embedding_keywords = ["embed", "embedding", "text-embedding"] + return [ + m for m in models + if any(kw in m["id"].lower() for kw in embedding_keywords) + ] + else: + # For chat, exclude embedding, whisper, tts, dall-e, moderation models + exclude_keywords = [ + "embed", "whisper", "tts", "dall-e", "davinci", "babbage", + "moderation", "curie", "ada-", "text-ada", "text-babbage", + "text-curie", "text-davinci", "code-", "audio" + ] + # For OpenRouter, include all since they're all chat models + if provider == "openrouter": + return models + return [ + m for m in models + if not any(kw in m["id"].lower() for kw in exclude_keywords) + ] + + +async def _fetch_models_openai_compatible( + api_key: str, + base_url: str, + extra_headers: dict[str, str] | None = None, +) -> list[dict[str, str]]: + """Fetch models from any OpenAI-compatible API.""" + import httpx + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + if extra_headers: + headers.update(extra_headers) + + url = f"{base_url.rstrip('/')}/models" + + try: + async with httpx.AsyncClient(timeout=15.0) as client: + response = await client.get(url, headers=headers) + + if response.status_code != 200: + PrintStyle.warning(f"API returned status {response.status_code} from {url}") + return [] + + data = response.json() + models_data = data.get("data", []) + + models = [] + for m in models_data: + model_id = m.get("id", "") + if model_id: + # Use id as name, or use name field if available + name = m.get("name") or model_id + models.append({"id": model_id, "name": name}) + + return models + + except httpx.HTTPError as e: + PrintStyle.warning(f"Failed to fetch models from {url}: {e}") + return [] + except Exception as e: + PrintStyle.error(f"Unexpected error fetching models from {url}: {e}") + return [] + + +async def _fetch_models_anthropic(api_key: str) -> list[dict[str, str]]: + """Fetch models from Anthropic API.""" + import httpx + + headers = { + "x-api-key": api_key, + "anthropic-version": "2023-06-01", + "Content-Type": "application/json", + } + + try: + async with httpx.AsyncClient(timeout=15.0) as client: + response = await client.get( + "https://api.anthropic.com/v1/models", + headers=headers, + ) + + if response.status_code != 200: + PrintStyle.warning(f"Anthropic API returned status {response.status_code}") + return [] + + data = response.json() + models_data = data.get("data", []) + + models = [] + for m in models_data: + model_id = m.get("id", "") + if model_id: + display_name = m.get("display_name") or model_id + models.append({"id": model_id, "name": display_name}) + + return models + + except httpx.HTTPError as e: + PrintStyle.warning(f"Failed to fetch Anthropic models: {e}") + return [] + except Exception as e: + PrintStyle.error(f"Unexpected error fetching Anthropic models: {e}") + return [] + + +async def _fetch_models_google(api_key: str) -> list[dict[str, str]]: + """Fetch models from Google Gemini API.""" + import httpx + + try: + async with httpx.AsyncClient(timeout=15.0) as client: + response = await client.get( + f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}", + ) + + if response.status_code != 200: + PrintStyle.warning(f"Google API returned status {response.status_code}") + return [] + + data = response.json() + models_data = data.get("models", []) + + models = [] + for m in models_data: + # Google returns names like "models/gemini-pro" + full_name = m.get("name", "") + model_id = full_name.replace("models/", "") if full_name.startswith("models/") else full_name + if model_id: + display_name = m.get("displayName") or model_id + models.append({"id": model_id, "name": display_name}) + + return models + + except httpx.HTTPError as e: + PrintStyle.warning(f"Failed to fetch Google models: {e}") + return [] + except Exception as e: + PrintStyle.error(f"Unexpected error fetching Google models: {e}") + return [] + + +async def _fetch_models_for_provider( + provider: str, + api_key: str, + api_base: str | None = None, +) -> list[dict[str, str]]: + """Fetch models from a specific provider.""" + if not api_key or api_key == "None" or api_key == "": + return [] + + # Handle Anthropic separately (different auth) + if provider == "anthropic": + return await _fetch_models_anthropic(api_key) + + # Handle Google separately (query param auth) + if provider == "google": + return await _fetch_models_google(api_key) + + # Handle known providers with predefined endpoints + if provider in PROVIDER_ENDPOINTS: + endpoint_config = PROVIDER_ENDPOINTS[provider] + return await _fetch_models_openai_compatible( + api_key=api_key, + base_url=endpoint_config["url"].rsplit("/models", 1)[0], + extra_headers=endpoint_config.get("extra_headers"), + ) + + # Handle OpenAI-compatible providers with custom api_base + if provider in OPENAI_COMPATIBLE_PROVIDERS and api_base: + return await _fetch_models_openai_compatible( + api_key=api_key, + base_url=api_base, + ) + + return [] + + +async def get_models_for_provider( + model_type: str, + provider: str, + api_keys: dict[str, str] | None = None, + api_base: str | None = None, + force_refresh: bool = False, +) -> list[FieldOption]: + """ + Get available models for a provider by fetching from their API. + + Args: + model_type: Either 'chat' or 'embedding' + provider: Provider ID (e.g., 'openai', 'anthropic', 'openrouter') + api_keys: Dictionary of API keys keyed by provider name + api_base: Optional custom API base URL for OpenAI-compatible providers + force_refresh: If True, bypass cache + + Returns: + List of FieldOption dicts with 'value' and 'label' keys + """ + if api_keys is None: + api_keys = {} + + # Get API key for this provider + api_key = api_keys.get(provider, "") + + # Check cache first (unless force refresh) + if not force_refresh: + cached = _get_cached_models(provider, model_type) + if cached: + return _convert_to_options(cached) + + # Fetch from provider API + models = await _fetch_models_for_provider(provider, api_key, api_base) + + if models: + # Filter by model type + models = _filter_models_by_type(models, model_type, provider) + + # Sort by name + models.sort(key=lambda x: x["name"].lower()) + + # Cache the results + _cache_models(provider, model_type, models) + + return _convert_to_options(models) + + +def _convert_to_options(models: list[dict[str, str]]) -> list[FieldOption]: + """Convert model list to FieldOption format.""" + options: list[FieldOption] = [] + + for m in models: + options.append({ + "value": m["id"], + "label": m["name"], + }) + + # Always add custom option at the end + options.append({ + "value": "__custom__", + "label": "Custom (enter manually)", + }) + + return options + + +def get_models_for_provider_sync( + model_type: str, + provider: str, + api_keys: dict[str, str] | None = None, +) -> list[FieldOption]: + """ + Synchronous version - returns cached models or empty list with custom option. + Used for initial settings load; async refresh happens on provider change. + """ + if api_keys is None: + api_keys = {} + + # Check cache + cached = _get_cached_models(provider, model_type) + if cached: + return _convert_to_options(cached) + + # No cache available - return just the custom option + # The frontend will trigger an async refresh when the modal opens + return [{ + "value": "__custom__", + "label": "Custom (enter manually)", + }] + + +def clear_cache(): + """Clear the model cache to force refresh on next request.""" + cache_path = files.get_abs_path(CACHE_FILE) + if os.path.exists(cache_path): + try: + os.remove(cache_path) + PrintStyle.info("Model cache cleared") + except IOError as e: + PrintStyle.warning(f"Failed to clear model cache: {e}") + + +def clear_provider_cache(provider: str, model_type: str = "chat"): + """Clear cache for a specific provider.""" + cache = _load_cache() + if cache and "providers" in cache: + key = f"{provider}_{model_type}" + if key in cache["providers"]: + del cache["providers"][key] + _save_cache(cache) + PrintStyle.info(f"Cleared cache for {provider}/{model_type}") diff --git a/python/helpers/persist_chat.py b/python/helpers/persist_chat.py index 290bf9c876..9e4c20b6c7 100644 --- a/python/helpers/persist_chat.py +++ b/python/helpers/persist_chat.py @@ -123,6 +123,10 @@ def _serialize_context(context: AgentContext): agents.append(_serialize_agent(agent)) agent = agent.data.get(Agent.DATA_NAME_SUBORDINATE, None) + + data = {k: v for k, v in context.data.items() if not k.startswith("_")} + output_data = {k: v for k, v in context.output_data.items() if not k.startswith("_")} + return { "id": context.id, "name": context.name, @@ -142,6 +146,8 @@ def _serialize_context(context: AgentContext): context.streaming_agent.number if context.streaming_agent else 0 ), "log": _serialize_log(context.log), + "data": data, + "output_data": output_data, } @@ -190,6 +196,8 @@ def _deserialize_context(data): ), log=log, paused=False, + data=data.get("data", {}), + output_data=data.get("output_data", {}), # agent0=agent0, # streaming_agent=straming_agent, ) @@ -254,17 +262,19 @@ def _deserialize_log(data: dict[str, Any]) -> "Log": # Deserialize the list of LogItem objects i = 0 for item_data in data.get("logs", []): - log.logs.append( - LogItem( - log=log, # restore the log reference - no=i, # item_data["no"], - type=item_data["type"], - heading=item_data.get("heading", ""), - content=item_data.get("content", ""), - kvps=OrderedDict(item_data["kvps"]) if item_data["kvps"] else None, - temp=item_data.get("temp", False), - ) - ) + log.logs.append(LogItem( + log=log, # restore the log reference + no=i, # item_data["no"], + type=item_data["type"], + heading=item_data.get("heading", ""), + content=item_data.get("content", ""), + kvps=OrderedDict(item_data["kvps"]) if item_data["kvps"] else None, + temp=item_data.get("temp", False), + # Pass metrics directly to constructor + timestamp=item_data.get("timestamp", 0.0), + duration_ms=item_data.get("duration_ms"), + agent_number=item_data.get("agent_number", 0), + )) log.updates.append(i) i += 1 diff --git a/python/helpers/playwright.py b/python/helpers/playwright.py index 34f851ab63..c352407b2b 100644 --- a/python/helpers/playwright.py +++ b/python/helpers/playwright.py @@ -1,4 +1,6 @@ +import os +import sys from pathlib import Path import subprocess from python.helpers import files @@ -9,8 +11,14 @@ def get_playwright_binary(): pw_cache = Path(get_playwright_cache_dir()) - headless_shell = next(pw_cache.glob("chromium_headless_shell-*/chrome-*/headless_shell"), None) - return headless_shell + for pattern in ( + "chromium_headless_shell-*/chrome-*/headless_shell", + "chromium_headless_shell-*/chrome-*/headless_shell.exe", + ): + binary = next(pw_cache.glob(pattern), None) + if binary: + return binary + return None def get_playwright_cache_dir(): return files.get_abs_path("tmp/playwright") @@ -19,7 +27,6 @@ def ensure_playwright_binary(): bin = get_playwright_binary() if not bin: cache = get_playwright_cache_dir() - import os env = os.environ.copy() env["PLAYWRIGHT_BROWSERS_PATH"] = cache subprocess.check_call( diff --git a/python/helpers/print_style.py b/python/helpers/print_style.py index 6d6e5f4f58..188697c866 100644 --- a/python/helpers/print_style.py +++ b/python/helpers/print_style.py @@ -95,9 +95,10 @@ def get(self, *args, sep=' ', **kwargs): # Automatically mask secrets in all print output try: - from python.helpers.secrets import SecretsManager - secrets_mgr = SecretsManager.get_instance() - text = secrets_mgr.mask_values(text) + if not hasattr(self, "secrets_mgr"): + from python.helpers.secrets import get_secrets_manager + self.secrets_mgr = get_secrets_manager() + text = self.secrets_mgr.mask_values(text) except Exception: # If masking fails, proceed without masking to avoid breaking functionality pass diff --git a/python/helpers/projects.py b/python/helpers/projects.py new file mode 100644 index 0000000000..6e25738c6e --- /dev/null +++ b/python/helpers/projects.py @@ -0,0 +1,429 @@ +import os +from typing import Literal, TypedDict, TYPE_CHECKING + +from python.helpers import files, dirty_json, persist_chat, file_tree +from python.helpers.print_style import PrintStyle + + +if TYPE_CHECKING: + from agent import AgentContext + +PROJECTS_PARENT_DIR = "usr/projects" +PROJECT_META_DIR = ".a0proj" +PROJECT_INSTRUCTIONS_DIR = "instructions" +PROJECT_KNOWLEDGE_DIR = "knowledge" +PROJECT_HEADER_FILE = "project.json" + +CONTEXT_DATA_KEY_PROJECT = "project" + + +class FileStructureInjectionSettings(TypedDict): + enabled: bool + max_depth: int + max_files: int + max_folders: int + max_lines: int + gitignore: str + +class SubAgentSettings(TypedDict): + enabled: bool + +class BasicProjectData(TypedDict): + title: str + description: str + instructions: str + color: str + memory: Literal[ + "own", "global" + ] # in the future we can add cutom and point to another existing folder + file_structure: FileStructureInjectionSettings + +class EditProjectData(BasicProjectData): + name: str + instruction_files_count: int + knowledge_files_count: int + variables: str + secrets: str + subagents: dict[str, SubAgentSettings] + + + +def get_projects_parent_folder(): + return files.get_abs_path(PROJECTS_PARENT_DIR) + + +def get_project_folder(name: str): + return files.get_abs_path(get_projects_parent_folder(), name) + + +def get_project_meta_folder(name: str, *sub_dirs: str): + return files.get_abs_path(get_project_folder(name), PROJECT_META_DIR, *sub_dirs) + + +def delete_project(name: str): + abs_path = files.get_abs_path(PROJECTS_PARENT_DIR, name) + files.delete_dir(abs_path) + deactivate_project_in_chats(name) + return name + + +def create_project(name: str, data: BasicProjectData): + abs_path = files.create_dir_safe( + files.get_abs_path(PROJECTS_PARENT_DIR, name), rename_format="{name}_{number}" + ) + create_project_meta_folders(name) + data = _normalizeBasicData(data) + save_project_header(name, data) + return name + + +def load_project_header(name: str): + abs_path = files.get_abs_path( + PROJECTS_PARENT_DIR, name, PROJECT_META_DIR, PROJECT_HEADER_FILE + ) + header: dict = dirty_json.parse(files.read_file(abs_path)) # type: ignore + header["name"] = name + return header + + +def _default_file_structure_settings(): + try: + gitignore = files.read_file("conf/projects.default.gitignore") + except Exception: + gitignore = "" + return FileStructureInjectionSettings( + enabled=True, + max_depth=5, + max_files=20, + max_folders=20, + max_lines=250, + gitignore=gitignore, + ) + + +def _normalizeBasicData(data: BasicProjectData): + return BasicProjectData( + title=data.get("title", ""), + description=data.get("description", ""), + instructions=data.get("instructions", ""), + color=data.get("color", ""), + memory=data.get("memory", "own"), + file_structure=data.get( + "file_structure", + _default_file_structure_settings(), + ), + ) + + +def _normalizeEditData(data: EditProjectData): + return EditProjectData( + name=data.get("name", ""), + title=data.get("title", ""), + description=data.get("description", ""), + instructions=data.get("instructions", ""), + variables=data.get("variables", ""), + color=data.get("color", ""), + instruction_files_count=data.get("instruction_files_count", 0), + knowledge_files_count=data.get("knowledge_files_count", 0), + secrets=data.get("secrets", ""), + memory=data.get("memory", "own"), + file_structure=data.get( + "file_structure", + _default_file_structure_settings(), + ), + subagents=data.get("subagents", {}), + ) + + +def _edit_data_to_basic_data(data: EditProjectData): + return _normalizeBasicData(data) + + +def _basic_data_to_edit_data(data: BasicProjectData): + return _normalizeEditData(data) # type: ignore + + +def update_project(name: str, data: EditProjectData): + # merge with current state + current = load_edit_project_data(name) + current.update(data) + current = _normalizeEditData(current) + + # save header data + header = _edit_data_to_basic_data(current) + save_project_header(name, header) + + # save secrets + save_project_variables(name, current["variables"]) + save_project_secrets(name, current["secrets"]) + save_project_subagents(name, current["subagents"]) + + reactivate_project_in_chats(name) + return name + + +def load_basic_project_data(name: str) -> BasicProjectData: + data = BasicProjectData(**load_project_header(name)) + normalized = _normalizeBasicData(data) + return normalized + + +def load_edit_project_data(name: str) -> EditProjectData: + data = load_basic_project_data(name) + additional_instructions = get_additional_instructions_files( + name + ) # for additional info + variables = load_project_variables(name) + secrets = load_project_secrets_masked(name) + subagents = load_project_subagents(name) + knowledge_files_count = get_knowledge_files_count(name) + data = EditProjectData( + **data, + name=name, + instruction_files_count=len(additional_instructions), + knowledge_files_count=knowledge_files_count, + variables=variables, + secrets=secrets, + subagents=subagents, + ) + data = _normalizeEditData(data) + return data + + +def save_project_header(name: str, data: BasicProjectData): + # save project header file + header = dirty_json.stringify(data) + abs_path = files.get_abs_path( + PROJECTS_PARENT_DIR, name, PROJECT_META_DIR, PROJECT_HEADER_FILE + ) + + files.write_file(abs_path, header) + + +def get_active_projects_list(): + return _get_projects_list(get_projects_parent_folder()) + + +def _get_projects_list(parent_dir): + projects = [] + + # folders in project directory + for name in os.listdir(parent_dir): + try: + abs_path = os.path.join(parent_dir, name) + if os.path.isdir(abs_path): + project_data = load_basic_project_data(name) + projects.append( + { + "name": name, + "title": project_data.get("title", ""), + "description": project_data.get("description", ""), + "color": project_data.get("color", ""), + } + ) + except Exception as e: + PrintStyle.error(f"Error loading project {name}: {str(e)}") + + # sort projects by name + projects.sort(key=lambda x: x["name"]) + return projects + + +def activate_project(context_id: str, name: str): + from agent import AgentContext + + data = load_edit_project_data(name) + context = AgentContext.get(context_id) + if context is None: + raise Exception("Context not found") + display_name = str(data.get("title", name)) + display_name = display_name[:22] + "..." if len(display_name) > 25 else display_name + context.set_data(CONTEXT_DATA_KEY_PROJECT, name) + context.set_output_data( + CONTEXT_DATA_KEY_PROJECT, + {"name": name, "title": display_name, "color": data.get("color", "")}, + ) + + # persist + persist_chat.save_tmp_chat(context) + + +def deactivate_project(context_id: str): + from agent import AgentContext + + context = AgentContext.get(context_id) + if context is None: + raise Exception("Context not found") + context.set_data(CONTEXT_DATA_KEY_PROJECT, None) + context.set_output_data(CONTEXT_DATA_KEY_PROJECT, None) + + # persist + persist_chat.save_tmp_chat(context) + + +def reactivate_project_in_chats(name: str): + from agent import AgentContext + + for context in AgentContext.all(): + if context.get_data(CONTEXT_DATA_KEY_PROJECT) == name: + activate_project(context.id, name) + persist_chat.save_tmp_chat(context) + + +def deactivate_project_in_chats(name: str): + from agent import AgentContext + + for context in AgentContext.all(): + if context.get_data(CONTEXT_DATA_KEY_PROJECT) == name: + deactivate_project(context.id) + persist_chat.save_tmp_chat(context) + + +def build_system_prompt_vars(name: str): + project_data = load_basic_project_data(name) + main_instructions = project_data.get("instructions", "") or "" + additional_instructions = get_additional_instructions_files(name) + complete_instructions = ( + main_instructions + + "\n\n".join( + additional_instructions[k] for k in sorted(additional_instructions) + ) + ).strip() + return { + "project_name": project_data.get("title", ""), + "project_description": project_data.get("description", ""), + "project_instructions": complete_instructions or "", + "project_path": files.normalize_a0_path(get_project_folder(name)), + } + + +def get_additional_instructions_files(name: str): + instructions_folder = files.get_abs_path( + get_project_folder(name), PROJECT_META_DIR, PROJECT_INSTRUCTIONS_DIR + ) + return files.read_text_files_in_dir(instructions_folder) + + +def get_context_project_name(context: "AgentContext") -> str | None: + return context.get_data(CONTEXT_DATA_KEY_PROJECT) + + +def load_project_variables(name: str): + try: + abs_path = files.get_abs_path(get_project_meta_folder(name), "variables.env") + return files.read_file(abs_path) + except Exception: + return "" + + +def save_project_variables(name: str, variables: str): + abs_path = files.get_abs_path(get_project_meta_folder(name), "variables.env") + files.write_file(abs_path, variables) + + +def load_project_subagents(name: str) -> dict[str, SubAgentSettings]: + try: + abs_path = files.get_abs_path(get_project_meta_folder(name), "agents.json") + data = dirty_json.parse(files.read_file(abs_path)) + if isinstance(data, dict): + return _normalize_subagents(data) # type: ignore[arg-type,return-value] + return {} + except Exception: + return {} + + +def save_project_subagents(name: str, subagents_data: dict[str, SubAgentSettings]): + abs_path = files.get_abs_path(get_project_meta_folder(name), "agents.json") + normalized = _normalize_subagents(subagents_data) + content = dirty_json.stringify(normalized) + files.write_file(abs_path, content) + + +def _normalize_subagents( + subagents_data: dict[str, SubAgentSettings] +) -> dict[str, SubAgentSettings]: + from python.helpers import subagents + + agents_dict = subagents.get_agents_dict() + + normalized: dict[str, SubAgentSettings] = {} + for key, value in subagents_data.items(): + agent = agents_dict.get(key) + if not agent: + continue + + enabled = bool(value["enabled"]) + if agent.enabled == enabled: + continue + + normalized[key] = {"enabled": enabled} + + return normalized + + +def load_project_secrets_masked(name: str, merge_with_global=False): + from python.helpers import secrets + + mgr = secrets.get_project_secrets_manager(name, merge_with_global) + return mgr.get_masked_secrets() + + +def save_project_secrets(name: str, secrets: str): + from python.helpers.secrets import get_project_secrets_manager + + secrets_manager = get_project_secrets_manager(name) + secrets_manager.save_secrets_with_merge(secrets) + + +def get_context_memory_subdir(context: "AgentContext") -> str | None: + # if a project is active and has memory isolation set, return the project memory subdir + project_name = get_context_project_name(context) + if project_name: + project_data = load_basic_project_data(project_name) + if project_data["memory"] == "own": + return "projects/" + project_name + return None # no memory override + + +def create_project_meta_folders(name: str): + # create instructions folder + files.create_dir(get_project_meta_folder(name, PROJECT_INSTRUCTIONS_DIR)) + + # create knowledge folders + files.create_dir(get_project_meta_folder(name, PROJECT_KNOWLEDGE_DIR)) + from python.helpers import memory + + for memory_type in memory.Memory.Area: + files.create_dir( + get_project_meta_folder(name, PROJECT_KNOWLEDGE_DIR, memory_type.value) + ) + + +def get_knowledge_files_count(name: str): + knowledge_folder = files.get_abs_path( + get_project_meta_folder(name, PROJECT_KNOWLEDGE_DIR) + ) + return len(files.list_files_in_dir_recursively(knowledge_folder)) + +def get_file_structure(name: str, basic_data: BasicProjectData|None=None) -> str: + project_folder = get_project_folder(name) + if basic_data is None: + basic_data = load_basic_project_data(name) + + tree = str(file_tree.file_tree( + project_folder, + max_depth=basic_data["file_structure"]["max_depth"], + max_files=basic_data["file_structure"]["max_files"], + max_folders=basic_data["file_structure"]["max_folders"], + max_lines=basic_data["file_structure"]["max_lines"], + ignore=basic_data["file_structure"]["gitignore"], + output_mode=file_tree.OUTPUT_MODE_STRING + )) + + # empty? + if "\n" not in tree: + tree += "\n # Empty" + + return tree + + \ No newline at end of file diff --git a/python/helpers/providers.py b/python/helpers/providers.py index cd139e88aa..f60238bd56 100644 --- a/python/helpers/providers.py +++ b/python/helpers/providers.py @@ -1,7 +1,8 @@ import yaml from python.helpers import files -from typing import List, Dict, Optional, TypedDict +from typing import List, Dict, Optional, TypedDict, Literal +ModelType = Literal["chat", "embedding"] # Type alias for UI option items class FieldOption(TypedDict): @@ -68,16 +69,15 @@ def _load_providers(self): opts.append({"value": pid, "label": name}) self._options[p_type] = opts - def get_providers(self, provider_type: str) -> List[FieldOption]: + def get_providers(self, provider_type: ModelType) -> List[FieldOption]: """Returns a list of providers for a given type (e.g., 'chat', 'embedding').""" return self._options.get(provider_type, []) if self._options else [] - - def get_raw_providers(self, provider_type: str) -> List[Dict[str, str]]: + def get_raw_providers(self, provider_type: ModelType) -> List[Dict[str, str]]: """Return raw provider dictionaries for advanced use-cases.""" return self._raw.get(provider_type, []) if self._raw else [] - def get_provider_config(self, provider_type: str, provider_id: str) -> Optional[Dict[str, str]]: + def get_provider_config(self, provider_type: ModelType, provider_id: str) -> Optional[Dict[str, str]]: """Return the metadata dict for a single provider id (case-insensitive).""" provider_id_low = provider_id.lower() for p in self.get_raw_providers(provider_type): @@ -86,16 +86,16 @@ def get_provider_config(self, provider_type: str, provider_id: str) -> Optional[ return None -def get_providers(provider_type: str) -> List[FieldOption]: +def get_providers(provider_type: ModelType) -> List[FieldOption]: """Convenience function to get providers of a specific type.""" return ProviderManager.get_instance().get_providers(provider_type) -def get_raw_providers(provider_type: str) -> List[Dict[str, str]]: +def get_raw_providers(provider_type: ModelType) -> List[Dict[str, str]]: """Return full metadata for providers of a given type.""" return ProviderManager.get_instance().get_raw_providers(provider_type) -def get_provider_config(provider_type: str, provider_id: str) -> Optional[Dict[str, str]]: +def get_provider_config(provider_type: ModelType, provider_id: str) -> Optional[Dict[str, str]]: """Return metadata for a single provider (None if not found).""" - return ProviderManager.get_instance().get_provider_config(provider_type, provider_id) \ No newline at end of file + return ProviderManager.get_instance().get_provider_config(provider_type, provider_id) \ No newline at end of file diff --git a/python/helpers/runtime.py b/python/helpers/runtime.py index d36a9958c0..2120b36cf4 100644 --- a/python/helpers/runtime.py +++ b/python/helpers/runtime.py @@ -1,14 +1,16 @@ import argparse import inspect import secrets +from pathlib import Path from typing import TypeVar, Callable, Awaitable, Union, overload, cast from python.helpers import dotenv, rfc, settings, files import asyncio import threading import queue +import sys -T = TypeVar('T') -R = TypeVar('R') +T = TypeVar("T") +R = TypeVar("R") parser = argparse.ArgumentParser() args = {} @@ -40,31 +42,38 @@ def initialize(): key = key.lstrip("-") args[key] = value + def get_arg(name: str): global args return args.get(name, None) + def has_arg(name: str): global args return name in args + def is_dockerized() -> bool: return bool(get_arg("dockerized")) + def is_development() -> bool: return not is_dockerized() + def get_local_url(): if is_dockerized(): return "host.docker.internal" return "127.0.0.1" + def get_runtime_id() -> str: global runtime_id if not runtime_id: - runtime_id = secrets.token_hex(8) + runtime_id = secrets.token_hex(8) return runtime_id + def get_persistent_id() -> str: id = dotenv.get_dotenv_value("A0_PERSISTENT_RUNTIME_ID") if not id: @@ -72,17 +81,28 @@ def get_persistent_id() -> str: dotenv.save_dotenv_value("A0_PERSISTENT_RUNTIME_ID", id) return id + @overload -async def call_development_function(func: Callable[..., Awaitable[T]], *args, **kwargs) -> T: ... +async def call_development_function( + func: Callable[..., Awaitable[T]], *args, **kwargs +) -> T: ... + @overload async def call_development_function(func: Callable[..., T], *args, **kwargs) -> T: ... -async def call_development_function(func: Union[Callable[..., T], Callable[..., Awaitable[T]]], *args, **kwargs) -> T: + +async def call_development_function( + func: Union[Callable[..., T], Callable[..., Awaitable[T]]], *args, **kwargs +) -> T: if is_development(): url = _get_rfc_url() password = _get_rfc_password() - module = files.deabsolute_path(func.__code__.co_filename).replace("/", ".").removesuffix(".py") # __module__ is not reliable + # Normalize path components to build a valid Python module path across OSes + module_path = Path( + files.deabsolute_path(func.__code__.co_filename) + ).with_suffix("") + module = ".".join(module_path.parts) # __module__ is not reliable result = await rfc.call_rfc( url=url, password=password, @@ -96,7 +116,7 @@ async def call_development_function(func: Union[Callable[..., T], Callable[..., if inspect.iscoroutinefunction(func): return await func(*args, **kwargs) else: - return func(*args, **kwargs) # type: ignore + return func(*args, **kwargs) # type: ignore async def handle_rfc(rfc_call: rfc.RFCCall): @@ -114,45 +134,61 @@ def _get_rfc_url() -> str: set = settings.get_settings() url = set["rfc_url"] if not "://" in url: - url = "http://"+url + url = "http://" + url if url.endswith("/"): url = url[:-1] - url = url+":"+str(set["rfc_port_http"]) + url = url + ":" + str(set["rfc_port_http"]) url += "/rfc" return url -def call_development_function_sync(func: Union[Callable[..., T], Callable[..., Awaitable[T]]], *args, **kwargs) -> T: +def call_development_function_sync( + func: Union[Callable[..., T], Callable[..., Awaitable[T]]], *args, **kwargs +) -> T: # run async function in sync manner result_queue = queue.Queue() - + def run_in_thread(): result = asyncio.run(call_development_function(func, *args, **kwargs)) result_queue.put(result) - + thread = threading.Thread(target=run_in_thread) thread.start() thread.join(timeout=30) # wait for thread with timeout - + if thread.is_alive(): raise TimeoutError("Function call timed out after 30 seconds") - + result = result_queue.get_nowait() return cast(T, result) def get_web_ui_port(): web_ui_port = ( - get_arg("port") - or int(dotenv.get_dotenv_value("WEB_UI_PORT", 0)) - or 5000 + get_arg("port") or int(dotenv.get_dotenv_value("WEB_UI_PORT", 0)) or 5000 ) return web_ui_port + def get_tunnel_api_port(): tunnel_api_port = ( get_arg("tunnel_api_port") or int(dotenv.get_dotenv_value("TUNNEL_API_PORT", 0)) or 55520 ) - return tunnel_api_port \ No newline at end of file + return tunnel_api_port + + +def get_platform(): + return sys.platform + + +def is_windows(): + return get_platform() == "win32" + + +def get_terminal_executable(): + if is_windows(): + return "powershell.exe" + else: + return "/bin/bash" diff --git a/python/helpers/secrets.py b/python/helpers/secrets.py index a4b58c51c1..53ee85d9b0 100644 --- a/python/helpers/secrets.py +++ b/python/helpers/secrets.py @@ -4,27 +4,32 @@ import os from io import StringIO from dataclasses import dataclass -from typing import Dict, Optional, List, Literal, Set, Callable +from typing import Dict, Optional, List, Literal, Set, Callable, Tuple, TYPE_CHECKING from dotenv.parser import parse_stream from python.helpers.errors import RepairableException from python.helpers import files +if TYPE_CHECKING: + from agent import AgentContext + # New alias-based placeholder format Β§Β§secret(KEY) ALIAS_PATTERN = r"Β§Β§secret\(([A-Za-z_][A-Za-z0-9_]*)\)" +DEFAULT_SECRETS_FILE = "tmp/secrets.env" + def alias_for_key(key: str, placeholder: str = "Β§Β§secret({key})") -> str: # Return alias string for given key in upper-case key = key.upper() return placeholder.format(key=key) + @dataclass class EnvLine: raw: str type: Literal["pair", "comment", "blank", "other"] key: Optional[str] = None value: Optional[str] = None - key_part: Optional[str] = None # original left side including whitespace up to '=' inline_comment: Optional[str] = ( None # preserves trailing inline comment including leading spaces and '#' ) @@ -118,44 +123,55 @@ def finalize(self) -> str: class SecretsManager: - SECRETS_FILE = "tmp/secrets.env" PLACEHOLDER_PATTERN = ALIAS_PATTERN MASK_VALUE = "***" - _instance: Optional["SecretsManager"] = None + _instances: Dict[Tuple[str, ...], "SecretsManager"] = {} _secrets_cache: Optional[Dict[str, str]] = None _last_raw_text: Optional[str] = None @classmethod - def get_instance(cls) -> "SecretsManager": - if cls._instance is None: - cls._instance = cls() - return cls._instance - - def __init__(self): + def get_instance(cls, *secrets_files: str) -> "SecretsManager": + if not secrets_files: + secrets_files = (DEFAULT_SECRETS_FILE,) + key = tuple(secrets_files) + if key not in cls._instances: + cls._instances[key] = cls(*secrets_files) + return cls._instances[key] + + def __init__(self, *files: str): self._lock = threading.RLock() - # instance-level override for secrets file - self._secrets_file_rel = self.SECRETS_FILE - - def set_secrets_file(self, relative_path: str): - """Override the relative secrets file location (useful for tests).""" - with self._lock: - self._secrets_file_rel = relative_path - self.clear_cache() + # instance-level list of secrets files + self._files: Tuple[str, ...] = tuple(files) if files else (DEFAULT_SECRETS_FILE,) + self._raw_snapshots: Dict[str, str] = {} + self._secrets_cache = None + self._last_raw_text = None def read_secrets_raw(self) -> str: """Read raw secrets file content from local filesystem (same system).""" - try: - content = files.read_file(self._secrets_file_rel) - self._last_raw_text = content - return content - except Exception: - self._last_raw_text = "" - return "" + parts: List[str] = [] + self._raw_snapshots = {} + + for path in self._files: + try: + content = files.read_file(path) + except Exception: + content = "" + + self._raw_snapshots[path] = content + parts.append(content) + + combined = "\n".join(parts) + self._last_raw_text = combined + return combined def _write_secrets_raw(self, content: str): """Write raw secrets file content to local filesystem.""" - files.write_file(self._secrets_file_rel, content) + if len(self._files) != 1: + raise RuntimeError( + "Saving secrets content is only supported for a single secrets file" + ) + files.write_file(self._files[0], content) def load_secrets(self) -> Dict[str, str]: """Load secrets from file, return key-value dict""" @@ -163,29 +179,27 @@ def load_secrets(self) -> Dict[str, str]: if self._secrets_cache is not None: return self._secrets_cache - secrets: Dict[str, str] = {} - try: - content = self.read_secrets_raw() - # keep raw snapshot for future save merge without reading again - self._last_raw_text = content - if content: - secrets = self.parse_env_content(content) - except Exception as e: - # On unexpected failure, keep empty cache rather than crash - secrets = {} - - self._secrets_cache = secrets - return secrets + combined_raw = self.read_secrets_raw() + merged_secrets = ( + self.parse_env_content(combined_raw) if combined_raw else {} + ) + + # Only track the first file's raw text for single-file setups + if len(self._files) != 1: + self._last_raw_text = None + + self._secrets_cache = merged_secrets + return merged_secrets def save_secrets(self, secrets_content: str): """Save secrets content to file and update cache""" + if len(self._files) != 1: + raise RuntimeError( + "Saving secrets is disabled when multiple files are configured" + ) with self._lock: - # Ensure write to local filesystem (UTF-8) self._write_secrets_raw(secrets_content) - # Update cache - self._secrets_cache = self.parse_env_content(secrets_content) - # Update raw snapshot - self._last_raw_text = secrets_content + self._invalidate_all_caches() def save_secrets_with_merge(self, submitted_content: str): """Merge submitted content with existing file preserving comments, order and supporting deletion. @@ -193,13 +207,19 @@ def save_secrets_with_merge(self, submitted_content: str): - Keys present in existing but omitted from submitted are deleted. - New keys with non-masked values are appended at the end. """ + if len(self._files) != 1: + raise RuntimeError( + "Merging secrets is disabled when multiple files are configured" + ) with self._lock: # Prefer in-memory snapshot to avoid disk reads during save + primary_path = self._files[0] if self._last_raw_text is not None: existing_text = self._last_raw_text else: try: - existing_text = self.read_secrets_raw() + existing_text = files.read_file(primary_path) + self._raw_snapshots[primary_path] = existing_text except Exception as e: # If read fails and submitted contains masked values, abort to avoid losing values/comments if self.MASK_VALUE in submitted_content: @@ -210,7 +230,8 @@ def save_secrets_with_merge(self, submitted_content: str): existing_text = "" merged_lines = self._merge_env(existing_text, submitted_content) merged_text = self._serialize_env_lines(merged_lines) - self.save_secrets(merged_text) + self._write_secrets_raw(merged_text) + self._invalidate_all_caches() def get_keys(self) -> List[str]: """Get list of secret keys""" @@ -219,7 +240,7 @@ def get_keys(self) -> List[str]: def get_secrets_for_prompt(self) -> str: """Get formatted string of secret keys for system prompt""" - content = self._last_raw_text or self.read_secrets_raw() + content = self.read_secrets_raw() if not content: return "" @@ -251,9 +272,7 @@ def replacer(match): return secrets[key] else: available_keys = ", ".join(secrets.keys()) - error_msg = ( - f"Secret placeholder '{alias_for_key(key)}' not found in secrets store.\n" - ) + error_msg = f"Secret placeholder '{alias_for_key(key)}' not found in secrets store.\n" error_msg += f"Available secrets: {available_keys}" raise RepairableException(error_msg) @@ -276,7 +295,9 @@ def change_placeholders(self, text: str, new_format: str) -> str: return result - def mask_values(self, text: str, min_length: int = 4, placeholder: str = "Β§Β§secret({key})") -> str: + def mask_values( + self, text: str, min_length: int = 4, placeholder: str = "Β§Β§secret({key})" + ) -> str: """Replace actual secret values with placeholders in text""" if not text: return text @@ -295,18 +316,21 @@ def mask_values(self, text: str, min_length: int = 4, placeholder: str = "Β§Β§se def get_masked_secrets(self) -> str: """Get content with values masked for frontend display (preserves comments and unrecognized lines)""" - if not (content:=self.read_secrets_raw()): + content = self.read_secrets_raw() + if not content: return "" # Parse content for known keys using python-dotenv secrets_map = self.parse_env_content(content) env_lines = self.parse_env_lines(content) + # Replace values with mask for keys present for ln in env_lines: if ln.type == "pair" and ln.key is not None: ln.key = ln.key.upper() if ln.key in secrets_map and secrets_map[ln.key] != "": ln.value = self.MASK_VALUE + return self._serialize_env_lines(env_lines) def parse_env_content(self, content: str) -> Dict[str, str]: @@ -325,6 +349,13 @@ def clear_cache(self): """Clear the secrets cache""" with self._lock: self._secrets_cache = None + self._raw_snapshots = {} + self._last_raw_text = None + + @classmethod + def _invalidate_all_caches(cls): + for instance in cls._instances.values(): + instance.clear_cache() # ---------------- Internal helpers for parsing/merging ---------------- @@ -342,9 +373,7 @@ def parse_env_lines(self, content: str) -> List[EnvLine]: # Fallback to composed key_part if original not available if "=" in line_text: left, right = line_text.split("=", 1) - key_part = left else: - key_part = binding.key right = "" # Try to extract inline comment by scanning right side to comment start, respecting quotes in_single = False @@ -376,7 +405,6 @@ def parse_env_lines(self, content: str) -> List[EnvLine]: type="pair", key=binding.key, value=binding.value or "", - key_part=key_part, inline_comment=inline_comment, ) ) @@ -404,11 +432,15 @@ def _serialize_env_lines( out: List[str] = [] for ln in lines: if ln.type == "pair" and ln.key is not None: - left_raw = ln.key_part if ln.key_part is not None else ln.key + left_raw = ln.key left = left_raw.upper() val = ln.value if ln.value is not None else "" comment = ln.inline_comment or "" - formatted_key = key_formatter(left) if key_formatter else f"{key_delimiter}{left}{key_delimiter}" + formatted_key = ( + key_formatter(left) + if key_formatter + else f"{key_delimiter}{left}{key_delimiter}" + ) val_part = f'="{val}"' if with_values else "" comment_part = f" {comment}" if with_comments and comment else "" out.append(f"{formatted_key}{val_part}{comment_part}") @@ -455,11 +487,10 @@ def _merge_env(self, existing_text: str, submitted_text: str) -> List[EnvLine]: existing_val = existing_pairs[key].value or "" merged.append( EnvLine( - raw=f"{(sub.key_part or key)}={existing_val}", + raw=f"{key}={existing_val}", type="pair", key=key, value=existing_val, - key_part=sub.key_part or key, inline_comment=sub.inline_comment, ) ) @@ -471,3 +502,40 @@ def _merge_env(self, existing_text: str, submitted_text: str) -> List[EnvLine]: merged.append(sub) return merged + + +def get_secrets_manager(context: "AgentContext|None" = None) -> SecretsManager: + from python.helpers import projects + + # default secrets file + secret_files = [DEFAULT_SECRETS_FILE] + + # use AgentContext from contextvars if no context provided + if not context: + from agent import AgentContext + context = AgentContext.current() + + # merged with project secrets if active + if context: + project = projects.get_context_project_name(context) + if project: + secret_files.append(files.get_abs_path(projects.get_project_meta_folder(project), "secrets.env")) + + return SecretsManager.get_instance(*secret_files) + +def get_project_secrets_manager(project_name: str, merge_with_global: bool = False) -> SecretsManager: + from python.helpers import projects + + # default secrets file + secret_files = [] + + if merge_with_global: + secret_files.append(DEFAULT_SECRETS_FILE) + + # merged with project secrets if active + secret_files.append(files.get_abs_path(projects.get_project_meta_folder(project_name), "secrets.env")) + + return SecretsManager.get_instance(*secret_files) + +def get_default_secrets_manager() -> SecretsManager: + return SecretsManager.get_instance() \ No newline at end of file diff --git a/python/helpers/settings.py b/python/helpers/settings.py index 3c6d826b89..c5e8cd1883 100644 --- a/python/helpers/settings.py +++ b/python/helpers/settings.py @@ -4,17 +4,55 @@ import os import re import subprocess -from typing import Any, Literal, TypedDict, cast +from typing import Any, Literal, TypedDict, cast, TypeVar import models from python.helpers import runtime, whisper, defer, git from . import files, dotenv from python.helpers.print_style import PrintStyle -from python.helpers.providers import get_providers -from python.helpers.secrets import SecretsManager +from python.helpers.providers import get_providers, FieldOption as ProvidersFO +from python.helpers.secrets import get_default_secrets_manager from python.helpers import dirty_json +T = TypeVar("T") + + +def get_default_value(name: str, value: T) -> T: + """ + Load setting value from .env with A0_SET_ prefix, falling back to default. + + Args: + name: Setting name (will be prefixed with A0_SET_) + value: Default value to use if env var not set + + Returns: + Environment variable value (type-normalized) or default value + """ + env_value = dotenv.get_dotenv_value( + f"A0_SET_{name}", dotenv.get_dotenv_value(f"A0_SET_{name.upper()}", None) + ) + + if env_value is None: + return value + + # Normalize type to match value param type + try: + if isinstance(value, bool): + return env_value.strip().lower() in ("true", "1", "yes", "on") # type: ignore + elif isinstance(value, dict): + return json.loads(env_value.strip()) # type: ignore + elif isinstance(value, str): + return str(env_value).strip() # type: ignore + else: + return type(value)(env_value.strip()) # type: ignore + except (ValueError, TypeError, json.JSONDecodeError) as e: + PrintStyle(background_color="yellow", font_color="black").print( + f"Warning: Invalid value for A0_SET_{name}='{env_value}': {e}. Using default: {value}" + ) + return value + + class Settings(TypedDict): version: str @@ -87,7 +125,7 @@ class Settings(TypedDict): rfc_port_http: int rfc_port_ssh: int - shell_interface: Literal['local','ssh'] + shell_interface: Literal["local", "ssh"] stt_model_size: str stt_language: str @@ -111,6 +149,9 @@ class Settings(TypedDict): # LiteLLM global kwargs applied to all model calls litellm_global_kwargs: dict[str, Any] + update_check_enabled: bool + + class PartialSettings(Settings, total=False): pass @@ -152,8 +193,23 @@ class SettingsSection(TypedDict, total=False): tab: str # Indicates which tab this section belongs to +class ModelProvider(ProvidersFO): + pass + + +class SettingsOutputAdditional(TypedDict): + chat_providers: list[ModelProvider] + embedding_providers: list[ModelProvider] + shell_interfaces: list[FieldOption] + agent_subdirs: list[FieldOption] + knowledge_subdirs: list[FieldOption] + stt_models: list[FieldOption] + is_dockerized: bool + + class SettingsOutput(TypedDict): - sections: list[SettingsSection] + settings: Settings + additional: SettingsOutputAdditional PASSWORD_PLACEHOLDER = "****PSWD****" @@ -162,825 +218,47 @@ class SettingsOutput(TypedDict): SETTINGS_FILE = files.get_abs_path("tmp/settings.json") _settings: Settings | None = None +OptionT = TypeVar("OptionT", bound=FieldOption) -def convert_out(settings: Settings) -> SettingsOutput: - default_settings = get_default_settings() - - # main model section - chat_model_fields: list[SettingsField] = [] - chat_model_fields.append( - { - "id": "chat_model_provider", - "title": "Chat model provider", - "description": "Select provider for main chat model used by Agent Zero", - "type": "select", - "value": settings["chat_model_provider"], - "options": cast(list[FieldOption], get_providers("chat")), - } - ) - chat_model_fields.append( - { - "id": "chat_model_name", - "title": "Chat model name", - "description": "Exact name of model from selected provider", - "type": "text", - "value": settings["chat_model_name"], - } - ) - - chat_model_fields.append( - { - "id": "chat_model_api_base", - "title": "Chat model API base URL", - "description": "API base URL for main chat model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.", - "type": "text", - "value": settings["chat_model_api_base"], - } - ) - - chat_model_fields.append( - { - "id": "chat_model_ctx_length", - "title": "Chat model context length", - "description": "Maximum number of tokens in the context window for LLM. System prompt, chat history, RAG and response all count towards this limit.", - "type": "number", - "value": settings["chat_model_ctx_length"], - } - ) - - chat_model_fields.append( - { - "id": "chat_model_ctx_history", - "title": "Context window space for chat history", - "description": "Portion of context window dedicated to chat history visible to the agent. Chat history will automatically be optimized to fit. Smaller size will result in shorter and more summarized history. The remaining space will be used for system prompt, RAG and response.", - "type": "range", - "min": 0.01, - "max": 1, - "step": 0.01, - "value": settings["chat_model_ctx_history"], - } - ) - - chat_model_fields.append( - { - "id": "chat_model_vision", - "title": "Supports Vision", - "description": "Models capable of Vision can for example natively see the content of image attachments.", - "type": "switch", - "value": settings["chat_model_vision"], - } - ) - - chat_model_fields.append( - { - "id": "chat_model_rl_requests", - "title": "Requests per minute limit", - "description": "Limits the number of requests per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.", - "type": "number", - "value": settings["chat_model_rl_requests"], - } - ) - - chat_model_fields.append( - { - "id": "chat_model_rl_input", - "title": "Input tokens per minute limit", - "description": "Limits the number of input tokens per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.", - "type": "number", - "value": settings["chat_model_rl_input"], - } - ) - - chat_model_fields.append( - { - "id": "chat_model_rl_output", - "title": "Output tokens per minute limit", - "description": "Limits the number of output tokens per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.", - "type": "number", - "value": settings["chat_model_rl_output"], - } - ) - chat_model_fields.append( - { - "id": "chat_model_kwargs", - "title": "Chat model additional parameters", - "description": "Any other parameters supported by LiteLLM. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string.", - "type": "textarea", - "value": _dict_to_env(settings["chat_model_kwargs"]), - } - ) +def _ensure_option_present( + options: list[OptionT] | None, current_value: str | None +) -> list[OptionT]: + """ + Ensure the currently selected value exists in a dropdown options list. + If missing, inserts it at the front as {value: current_value, label: current_value}. + """ + opts = list(options or []) + if not current_value: + return opts + for o in opts: + if o.get("value") == current_value: + return opts + opts.insert(0, cast(OptionT, {"value": current_value, "label": current_value})) + return opts - chat_model_section: SettingsSection = { - "id": "chat_model", - "title": "Chat Model", - "description": "Selection and settings for main chat model used by Agent Zero", - "fields": chat_model_fields, - "tab": "agent", - } - - # main model section - util_model_fields: list[SettingsField] = [] - util_model_fields.append( - { - "id": "util_model_provider", - "title": "Utility model provider", - "description": "Select provider for utility model used by the framework", - "type": "select", - "value": settings["util_model_provider"], - "options": cast(list[FieldOption], get_providers("chat")), - } - ) - util_model_fields.append( - { - "id": "util_model_name", - "title": "Utility model name", - "description": "Exact name of model from selected provider", - "type": "text", - "value": settings["util_model_name"], - } - ) - - util_model_fields.append( - { - "id": "util_model_api_base", - "title": "Utility model API base URL", - "description": "API base URL for utility model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.", - "type": "text", - "value": settings["util_model_api_base"], - } - ) - util_model_fields.append( - { - "id": "util_model_rl_requests", - "title": "Requests per minute limit", - "description": "Limits the number of requests per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.", - "type": "number", - "value": settings["util_model_rl_requests"], - } - ) - - util_model_fields.append( - { - "id": "util_model_rl_input", - "title": "Input tokens per minute limit", - "description": "Limits the number of input tokens per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.", - "type": "number", - "value": settings["util_model_rl_input"], - } - ) - - util_model_fields.append( - { - "id": "util_model_rl_output", - "title": "Output tokens per minute limit", - "description": "Limits the number of output tokens per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.", - "type": "number", - "value": settings["util_model_rl_output"], - } - ) - - util_model_fields.append( - { - "id": "util_model_kwargs", - "title": "Utility model additional parameters", - "description": "Any other parameters supported by LiteLLM. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string.", - "type": "textarea", - "value": _dict_to_env(settings["util_model_kwargs"]), - } - ) - - util_model_section: SettingsSection = { - "id": "util_model", - "title": "Utility model", - "description": "Smaller, cheaper, faster model for handling utility tasks like organizing memory, preparing prompts, summarizing.", - "fields": util_model_fields, - "tab": "agent", - } - - # embedding model section - embed_model_fields: list[SettingsField] = [] - embed_model_fields.append( - { - "id": "embed_model_provider", - "title": "Embedding model provider", - "description": "Select provider for embedding model used by the framework", - "type": "select", - "value": settings["embed_model_provider"], - "options": cast(list[FieldOption], get_providers("embedding")), - } - ) - embed_model_fields.append( - { - "id": "embed_model_name", - "title": "Embedding model name", - "description": "Exact name of model from selected provider", - "type": "text", - "value": settings["embed_model_name"], - } - ) - - embed_model_fields.append( - { - "id": "embed_model_api_base", - "title": "Embedding model API base URL", - "description": "API base URL for embedding model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.", - "type": "text", - "value": settings["embed_model_api_base"], - } - ) - - embed_model_fields.append( - { - "id": "embed_model_rl_requests", - "title": "Requests per minute limit", - "description": "Limits the number of requests per minute to the embedding model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.", - "type": "number", - "value": settings["embed_model_rl_requests"], - } - ) - - embed_model_fields.append( - { - "id": "embed_model_rl_input", - "title": "Input tokens per minute limit", - "description": "Limits the number of input tokens per minute to the embedding model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.", - "type": "number", - "value": settings["embed_model_rl_input"], - } - ) - - embed_model_fields.append( - { - "id": "embed_model_kwargs", - "title": "Embedding model additional parameters", - "description": "Any other parameters supported by LiteLLM. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string.", - "type": "textarea", - "value": _dict_to_env(settings["embed_model_kwargs"]), - } - ) - - embed_model_section: SettingsSection = { - "id": "embed_model", - "title": "Embedding Model", - "description": f"Settings for the embedding model used by Agent Zero.

⚠️ No need to change

The default HuggingFace model {default_settings['embed_model_name']} is preloaded and runs locally within the docker container and there's no need to change it unless you have a specific requirements for embedding.", - "fields": embed_model_fields, - "tab": "agent", - } - - # embedding model section - browser_model_fields: list[SettingsField] = [] - browser_model_fields.append( - { - "id": "browser_model_provider", - "title": "Web Browser model provider", - "description": "Select provider for web browser model used by browser-use framework", - "type": "select", - "value": settings["browser_model_provider"], - "options": cast(list[FieldOption], get_providers("chat")), - } - ) - browser_model_fields.append( - { - "id": "browser_model_name", - "title": "Web Browser model name", - "description": "Exact name of model from selected provider", - "type": "text", - "value": settings["browser_model_name"], - } - ) - - browser_model_fields.append( - { - "id": "browser_model_api_base", - "title": "Web Browser model API base URL", - "description": "API base URL for web browser model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.", - "type": "text", - "value": settings["browser_model_api_base"], - } - ) - - browser_model_fields.append( - { - "id": "browser_model_vision", - "title": "Use Vision", - "description": "Models capable of Vision can use it to analyze web pages from screenshots. Increases quality but also token usage.", - "type": "switch", - "value": settings["browser_model_vision"], - } - ) - - browser_model_fields.append( - { - "id": "browser_model_rl_requests", - "title": "Web Browser model rate limit requests", - "description": "Rate limit requests for web browser model.", - "type": "number", - "value": settings["browser_model_rl_requests"], - } - ) - - browser_model_fields.append( - { - "id": "browser_model_rl_input", - "title": "Web Browser model rate limit input", - "description": "Rate limit input for web browser model.", - "type": "number", - "value": settings["browser_model_rl_input"], - } - ) - - browser_model_fields.append( - { - "id": "browser_model_rl_output", - "title": "Web Browser model rate limit output", - "description": "Rate limit output for web browser model.", - "type": "number", - "value": settings["browser_model_rl_output"], - } - ) - - browser_model_fields.append( - { - "id": "browser_model_kwargs", - "title": "Web Browser model additional parameters", - "description": "Any other parameters supported by LiteLLM. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string.", - "type": "textarea", - "value": _dict_to_env(settings["browser_model_kwargs"]), - } - ) - - browser_model_fields.append( - { - "id": "browser_http_headers", - "title": "HTTP Headers", - "description": "HTTP headers to include with all browser requests. Format is KEY=VALUE on individual lines, like .env file. Value can also contain JSON objects - when unquoted, it is treated as object, number etc., when quoted, it is treated as string. Example: Authorization=Bearer token123", - "type": "textarea", - "value": _dict_to_env(settings.get("browser_http_headers", {})), - } - ) - - browser_model_section: SettingsSection = { - "id": "browser_model", - "title": "Web Browser Model", - "description": "Settings for the web browser model. Agent Zero uses browser-use agentic framework to handle web interactions.", - "fields": browser_model_fields, - "tab": "agent", - } - - # basic auth section - auth_fields: list[SettingsField] = [] - - auth_fields.append( - { - "id": "auth_login", - "title": "UI Login", - "description": "Set user name for web UI", - "type": "text", - "value": dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN) or "", - } - ) - - auth_fields.append( - { - "id": "auth_password", - "title": "UI Password", - "description": "Set user password for web UI", - "type": "password", - "value": ( - PASSWORD_PLACEHOLDER - if dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD) - else "" - ), - } - ) - - if runtime.is_dockerized(): - auth_fields.append( - { - "id": "root_password", - "title": "root Password", - "description": "Change linux root password in docker container. This password can be used for SSH access. Original password was randomly generated during setup.", - "type": "password", - "value": "", - } - ) - - auth_section: SettingsSection = { - "id": "auth", - "title": "Authentication", - "description": "Settings for authentication to use Agent Zero Web UI.", - "fields": auth_fields, - "tab": "external", - } - - # api keys model section - api_keys_fields: list[SettingsField] = [] - - # Collect unique providers from both chat and embedding sections - providers_seen: set[str] = set() - for p_type in ("chat", "embedding"): - for provider in get_providers(p_type): - pid_lower = provider["value"].lower() - if pid_lower in providers_seen: - continue - providers_seen.add(pid_lower) - api_keys_fields.append( - _get_api_key_field(settings, pid_lower, provider["label"]) - ) - - api_keys_section: SettingsSection = { - "id": "api_keys", - "title": "API Keys", - "description": "API keys for model providers and services used by Agent Zero. You can set multiple API keys separated by a comma (,). They will be used in round-robin fashion.
For more information abou Agent Zero Venice provider, see Agent Zero Venice.", - "fields": api_keys_fields, - "tab": "external", - } - - # LiteLLM global config section - litellm_fields: list[SettingsField] = [] - - litellm_fields.append( - { - "id": "litellm_global_kwargs", - "title": "LiteLLM global parameters", - "description": "Global LiteLLM params (e.g. timeout, stream_timeout) in .env format: one KEY=VALUE per line. Example: stream_timeout=30. Applied to all LiteLLM calls unless overridden. See LiteLLM and timeouts.", - "type": "textarea", - "value": _dict_to_env(settings["litellm_global_kwargs"]), - "style": "height: 12em", - } - ) - - litellm_section: SettingsSection = { - "id": "litellm", - "title": "LiteLLM Global Settings", - "description": "Configure global parameters passed to LiteLLM for all providers.", - "fields": litellm_fields, - "tab": "external", - } - - # Agent config section - agent_fields: list[SettingsField] = [] - - agent_fields.append( - { - "id": "agent_profile", - "title": "Default agent profile", - "description": "Subdirectory of /agents folder to be used by default agent no. 0. Subordinate agents can be spawned with other profiles, that is on their superior agent to decide. This setting affects the behaviour of the top level agent you communicate with.", - "type": "select", - "value": settings["agent_profile"], - "options": [ +def convert_out(settings: Settings) -> SettingsOutput: + out = SettingsOutput( + settings=settings.copy(), + additional=SettingsOutputAdditional( + chat_providers=get_providers("chat"), + embedding_providers=get_providers("embedding"), + shell_interfaces=[ + {"value": "local", "label": "Local Python TTY"}, + {"value": "ssh", "label": "SSH"}, + ], + is_dockerized=runtime.is_dockerized(), + agent_subdirs=[ {"value": subdir, "label": subdir} for subdir in files.get_subdirectories("agents") if subdir != "_example" ], - } - ) - - agent_fields.append( - { - "id": "agent_knowledge_subdir", - "title": "Knowledge subdirectory", - "description": "Subdirectory of /knowledge folder to use for agent knowledge import. 'default' subfolder is always imported and contains framework knowledge.", - "type": "select", - "value": settings["agent_knowledge_subdir"], - "options": [ + knowledge_subdirs=[ {"value": subdir, "label": subdir} for subdir in files.get_subdirectories("knowledge", exclude="default") ], - } - ) - - agent_section: SettingsSection = { - "id": "agent", - "title": "Agent Config", - "description": "Agent parameters.", - "fields": agent_fields, - "tab": "agent", - } - - memory_fields: list[SettingsField] = [] - - memory_fields.append( - { - "id": "agent_memory_subdir", - "title": "Memory Subdirectory", - "description": "Subdirectory of /memory folder to use for agent memory storage. Used to separate memory storage between different instances.", - "type": "text", - "value": settings["agent_memory_subdir"], - # "options": [ - # {"value": subdir, "label": subdir} - # for subdir in files.get_subdirectories("memory", exclude="embeddings") - # ], - } - ) - - memory_fields.append( - { - "id": "memory_dashboard", - "title": "Memory Dashboard", - "description": "View and explore all stored memories in a table format with filtering and search capabilities.", - "type": "button", - "value": "Open Dashboard", - } - ) - - memory_fields.append( - { - "id": "memory_recall_enabled", - "title": "Memory auto-recall enabled", - "description": "Agent Zero will automatically recall memories based on convesation context.", - "type": "switch", - "value": settings["memory_recall_enabled"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_delayed", - "title": "Memory auto-recall delayed", - "description": "The agent will not wait for auto memory recall. Memories will be delivered one message later. This speeds up agent's response time but may result in less relevant first step.", - "type": "switch", - "value": settings["memory_recall_delayed"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_query_prep", - "title": "Auto-recall AI query preparation", - "description": "Enables vector DB query preparation from conversation context by utility LLM for auto-recall. Improves search quality, adds 1 utility LLM call per auto-recall.", - "type": "switch", - "value": settings["memory_recall_query_prep"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_post_filter", - "title": "Auto-recall AI post-filtering", - "description": "Enables memory relevance filtering by utility LLM for auto-recall. Improves search quality, adds 1 utility LLM call per auto-recall.", - "type": "switch", - "value": settings["memory_recall_post_filter"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_interval", - "title": "Memory auto-recall interval", - "description": "Memories are recalled after every user or superior agent message. During agent's monologue, memories are recalled every X turns based on this parameter.", - "type": "range", - "min": 1, - "max": 10, - "step": 1, - "value": settings["memory_recall_interval"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_history_len", - "title": "Memory auto-recall history length", - "description": "The length of conversation history passed to memory recall LLM for context (in characters).", - "type": "number", - "value": settings["memory_recall_history_len"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_similarity_threshold", - "title": "Memory auto-recall similarity threshold", - "description": "The threshold for similarity search in memory recall (0 = no similarity, 1 = exact match).", - "type": "range", - "min": 0, - "max": 1, - "step": 0.01, - "value": settings["memory_recall_similarity_threshold"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_memories_max_search", - "title": "Memory auto-recall max memories to search", - "description": "The maximum number of memories returned by vector DB for further processing.", - "type": "number", - "value": settings["memory_recall_memories_max_search"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_memories_max_result", - "title": "Memory auto-recall max memories to use", - "description": "The maximum number of memories to inject into A0's context window.", - "type": "number", - "value": settings["memory_recall_memories_max_result"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_solutions_max_search", - "title": "Memory auto-recall max solutions to search", - "description": "The maximum number of solutions returned by vector DB for further processing.", - "type": "number", - "value": settings["memory_recall_solutions_max_search"], - } - ) - - memory_fields.append( - { - "id": "memory_recall_solutions_max_result", - "title": "Memory auto-recall max solutions to use", - "description": "The maximum number of solutions to inject into A0's context window.", - "type": "number", - "value": settings["memory_recall_solutions_max_result"], - } - ) - - memory_fields.append( - { - "id": "memory_memorize_enabled", - "title": "Auto-memorize enabled", - "description": "A0 will automatically memorize facts and solutions from conversation history.", - "type": "switch", - "value": settings["memory_memorize_enabled"], - } - ) - - memory_fields.append( - { - "id": "memory_memorize_consolidation", - "title": "Auto-memorize AI consolidation", - "description": "A0 will automatically consolidate similar memories using utility LLM. Improves memory quality over time, adds 2 utility LLM calls per memory.", - "type": "switch", - "value": settings["memory_memorize_consolidation"], - } - ) - - memory_fields.append( - { - "id": "memory_memorize_replace_threshold", - "title": "Auto-memorize replacement threshold", - "description": "Only applies when AI consolidation is disabled. Replaces previous similar memories with new ones based on this threshold. 0 = replace even if not similar at all, 1 = replace only if exact match.", - "type": "range", - "min": 0, - "max": 1, - "step": 0.01, - "value": settings["memory_memorize_replace_threshold"], - } - ) - - memory_section: SettingsSection = { - "id": "memory", - "title": "Memory", - "description": "Configuration of A0's memory system. A0 memorizes and recalls memories automatically to help it's context awareness.", - "fields": memory_fields, - "tab": "agent", - } - - dev_fields: list[SettingsField] = [] - - dev_fields.append( - { - "id": "shell_interface", - "title": "Shell Interface", - "description": "Terminal interface used for Code Execution Tool. Local Python TTY works locally in both dockerized and development environments. SSH always connects to dockerized environment (automatically at localhost or RFC host address).", - "type": "select", - "value": settings["shell_interface"], - "options": [{"value": "local", "label": "Local Python TTY"}, {"value": "ssh", "label": "SSH"}], - } - ) - - if runtime.is_development(): - # dev_fields.append( - # { - # "id": "rfc_auto_docker", - # "title": "RFC Auto Docker Management", - # "description": "Automatically create dockerized instance of A0 for RFCs using this instance's code base and, settings and .env.", - # "type": "text", - # "value": settings["rfc_auto_docker"], - # } - # ) - - dev_fields.append( - { - "id": "rfc_url", - "title": "RFC Destination URL", - "description": "URL of dockerized A0 instance for remote function calls. Do not specify port here.", - "type": "text", - "value": settings["rfc_url"], - } - ) - - dev_fields.append( - { - "id": "rfc_password", - "title": "RFC Password", - "description": "Password for remote function calls. Passwords must match on both instances. RFCs can not be used with empty password.", - "type": "password", - "value": ( - PASSWORD_PLACEHOLDER - if dotenv.get_dotenv_value(dotenv.KEY_RFC_PASSWORD) - else "" - ), - } - ) - - if runtime.is_development(): - dev_fields.append( - { - "id": "rfc_port_http", - "title": "RFC HTTP port", - "description": "HTTP port for dockerized instance of A0.", - "type": "text", - "value": settings["rfc_port_http"], - } - ) - - dev_fields.append( - { - "id": "rfc_port_ssh", - "title": "RFC SSH port", - "description": "SSH port for dockerized instance of A0.", - "type": "text", - "value": settings["rfc_port_ssh"], - } - ) - - dev_section: SettingsSection = { - "id": "dev", - "title": "Development", - "description": "Parameters for A0 framework development. RFCs (remote function calls) are used to call functions on another A0 instance. You can develop and debug A0 natively on your local system while redirecting some functions to A0 instance in docker. This is crucial for development as A0 needs to run in standardized environment to support all features.", - "fields": dev_fields, - "tab": "developer", - } - - # code_exec_fields: list[SettingsField] = [] - - # code_exec_fields.append( - # { - # "id": "code_exec_ssh_enabled", - # "title": "Use SSH for code execution", - # "description": "Code execution will use SSH to connect to the terminal. When disabled, a local python terminal interface is used instead. SSH should only be used in development environment or when encountering issues with the local python terminal interface.", - # "type": "switch", - # "value": settings["code_exec_ssh_enabled"], - # } - # ) - - # code_exec_fields.append( - # { - # "id": "code_exec_ssh_addr", - # "title": "Code execution SSH address", - # "description": "Address of the SSH server for code execution. Only applies when SSH is enabled.", - # "type": "text", - # "value": settings["code_exec_ssh_addr"], - # } - # ) - - # code_exec_fields.append( - # { - # "id": "code_exec_ssh_port", - # "title": "Code execution SSH port", - # "description": "Port of the SSH server for code execution. Only applies when SSH is enabled.", - # "type": "text", - # "value": settings["code_exec_ssh_port"], - # } - # ) - - # code_exec_section: SettingsSection = { - # "id": "code_exec", - # "title": "Code execution", - # "description": "Configuration of code execution by the agent.", - # "fields": code_exec_fields, - # "tab": "developer", - # } - - # Speech to text section - stt_fields: list[SettingsField] = [] - - stt_fields.append( - { - "id": "stt_microphone_section", - "title": "Microphone device", - "description": "Select the microphone device to use for speech-to-text.", - "value": "", - "type": "html", - } - ) - - stt_fields.append( - { - "id": "stt_model_size", - "title": "Speech-to-text model size", - "description": "Select the speech-to-text model size", - "type": "select", - "value": settings["stt_model_size"], - "options": [ + stt_models=[ {"value": "tiny", "label": "Tiny (39M, English)"}, {"value": "base", "label": "Base (74M, English)"}, {"value": "small", "label": "Small (244M, English)"}, @@ -988,293 +266,86 @@ def convert_out(settings: Settings) -> SettingsOutput: {"value": "large", "label": "Large (1.5B, Multilingual)"}, {"value": "turbo", "label": "Turbo (Multilingual)"}, ], - } + ), ) - stt_fields.append( - { - "id": "stt_language", - "title": "Speech-to-text language code", - "description": "Language code (e.g. en, fr, it)", - "type": "text", - "value": settings["stt_language"], - } - ) + # ensure dropdown options include currently selected values + additional = out["additional"] + current = out["settings"] - stt_fields.append( - { - "id": "stt_silence_threshold", - "title": "Microphone silence threshold", - "description": "Silence detection threshold. Lower values are more sensitive to noise.", - "type": "range", - "min": 0, - "max": 1, - "step": 0.01, - "value": settings["stt_silence_threshold"], - } + additional["chat_providers"] = _ensure_option_present( + additional.get("chat_providers"), current.get("chat_model_provider") ) - - stt_fields.append( - { - "id": "stt_silence_duration", - "title": "Microphone silence duration (ms)", - "description": "Duration of silence before the system considers speaking to have ended.", - "type": "text", - "value": settings["stt_silence_duration"], - } + additional["chat_providers"] = _ensure_option_present( + additional.get("chat_providers"), current.get("util_model_provider") ) - - stt_fields.append( - { - "id": "stt_waiting_timeout", - "title": "Microphone waiting timeout (ms)", - "description": "Duration of silence before the system closes the microphone.", - "type": "text", - "value": settings["stt_waiting_timeout"], - } + additional["chat_providers"] = _ensure_option_present( + additional.get("chat_providers"), current.get("browser_model_provider") ) - - # TTS fields - tts_fields: list[SettingsField] = [] - - tts_fields.append( - { - "id": "tts_kokoro", - "title": "Enable Kokoro TTS", - "description": "Enable higher quality server-side AI (Kokoro) instead of browser-based text-to-speech.", - "type": "switch", - "value": settings["tts_kokoro"], - } + additional["embedding_providers"] = _ensure_option_present( + additional.get("embedding_providers"), current.get("embed_model_provider") ) - - speech_section: SettingsSection = { - "id": "speech", - "title": "Speech", - "description": "Voice transcription and speech synthesis settings.", - "fields": stt_fields + tts_fields, - "tab": "agent", - } - - # MCP section - mcp_client_fields: list[SettingsField] = [] - - mcp_client_fields.append( - { - "id": "mcp_servers_config", - "title": "MCP Servers Configuration", - "description": "External MCP servers can be configured here.", - "type": "button", - "value": "Open", - } + additional["shell_interfaces"] = _ensure_option_present( + additional.get("shell_interfaces"), current.get("shell_interface") ) - - mcp_client_fields.append( - { - "id": "mcp_servers", - "title": "MCP Servers", - "description": "(JSON list of) >> RemoteServer <<: [name, url, headers, timeout (opt), sse_read_timeout (opt), disabled (opt)] / >> Local Server <<: [name, command, args, env, encoding (opt), encoding_error_handler (opt), disabled (opt)]", - "type": "textarea", - "value": settings["mcp_servers"], - "hidden": True, - } + additional["agent_subdirs"] = _ensure_option_present( + additional.get("agent_subdirs"), current.get("agent_profile") ) - - mcp_client_fields.append( - { - "id": "mcp_client_init_timeout", - "title": "MCP Client Init Timeout", - "description": "Timeout for MCP client initialization (in seconds). Higher values might be required for complex MCPs, but might also slowdown system startup.", - "type": "number", - "value": settings["mcp_client_init_timeout"], - } + additional["knowledge_subdirs"] = _ensure_option_present( + additional.get("knowledge_subdirs"), current.get("agent_knowledge_subdir") ) - - mcp_client_fields.append( - { - "id": "mcp_client_tool_timeout", - "title": "MCP Client Tool Timeout", - "description": "Timeout for MCP client tool execution. Higher values might be required for complex tools, but might also result in long responses with failing tools.", - "type": "number", - "value": settings["mcp_client_tool_timeout"], - } - ) - - mcp_client_section: SettingsSection = { - "id": "mcp_client", - "title": "External MCP Servers", - "description": "Agent Zero can use external MCP servers, local or remote as tools.", - "fields": mcp_client_fields, - "tab": "mcp", - } - - # Secrets section - secrets_fields: list[SettingsField] = [] - - secrets_manager = SecretsManager.get_instance() - try: - secrets = secrets_manager.get_masked_secrets() - except Exception: - secrets = "" - - secrets_fields.append({ - "id": "variables", - "title": "Variables Store", - "description": "Store non-sensitive variables in .env format e.g. EMAIL_IMAP_SERVER=\"imap.gmail.com\", one item per line. You can use comments starting with # to add descriptions for the agent. See example.
These variables are visible to LLMs and in chat history, they are not being masked.", - "type": "textarea", - "value": settings["variables"].strip(), - "style": "height: 20em", - }) - - secrets_fields.append({ - "id": "secrets", - "title": "Secrets Store", - "description": "Store secrets and credentials in .env format e.g. EMAIL_PASSWORD=\"s3cret-p4$$w0rd\", one item per line. You can use comments starting with # to add descriptions for the agent. See example.
These variables are not visile to LLMs and in chat history, they are being masked. ⚠️ only values with length >= 4 are being masked to prevent false positives. ", - "type": "textarea", - "value": secrets, - "style": "height: 20em", - }) - - secrets_section: SettingsSection = { - "id": "secrets", - "title": "Secrets Management", - "description": "Manage secrets and credentials that agents can use without exposing values to LLMs, chat history or logs. Placeholders are automatically replaced with values just before tool calls. If bare passwords occur in tool results, they are masked back to placeholders.", - "fields": secrets_fields, - "tab": "external", - } - - mcp_server_fields: list[SettingsField] = [] - - mcp_server_fields.append( - { - "id": "mcp_server_enabled", - "title": "Enable A0 MCP Server", - "description": "Expose Agent Zero as an SSE/HTTP MCP server. This will make this A0 instance available to MCP clients.", - "type": "switch", - "value": settings["mcp_server_enabled"], - } - ) - - mcp_server_fields.append( - { - "id": "mcp_server_token", - "title": "MCP Server Token", - "description": "Token for MCP server authentication.", - "type": "text", - "hidden": True, - "value": settings["mcp_server_token"], - } - ) - - mcp_server_section: SettingsSection = { - "id": "mcp_server", - "title": "A0 MCP Server", - "description": "Agent Zero can be exposed as an SSE MCP server. See connection example.", - "fields": mcp_server_fields, - "tab": "mcp", - } - - # -------- A2A Section -------- - a2a_fields: list[SettingsField] = [] - - a2a_fields.append( - { - "id": "a2a_server_enabled", - "title": "Enable A2A server", - "description": "Expose Agent Zero as A2A server. This allows other agents to connect to A0 via A2A protocol.", - "type": "switch", - "value": settings["a2a_server_enabled"], - } + additional["stt_models"] = _ensure_option_present( + additional.get("stt_models"), current.get("stt_model_size") ) - a2a_section: SettingsSection = { - "id": "a2a_server", - "title": "A0 A2A Server", - "description": "Agent Zero can be exposed as an A2A server. See connection example.", - "fields": a2a_fields, - "tab": "mcp", - } - - - # External API section - external_api_fields: list[SettingsField] = [] + # masked api keys + providers = get_providers("chat") + get_providers("embedding") + for provider in providers: + provider_name = provider["value"] + api_key = settings["api_keys"].get( + provider_name, models.get_api_key(provider_name) + ) + settings["api_keys"][provider_name] = ( + API_KEY_PLACEHOLDER if api_key and api_key != "None" else "" + ) - external_api_fields.append( - { - "id": "external_api_examples", - "title": "API Examples", - "description": "View examples for using Agent Zero's external API endpoints with API key authentication.", - "type": "button", - "value": "Show API Examples", - } + # load auth from dotenv + out["settings"]["auth_login"] = dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN) or "" + out["settings"]["auth_password"] = ( + PASSWORD_PLACEHOLDER + if dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD) + else "" ) - - external_api_section: SettingsSection = { - "id": "external_api", - "title": "External API", - "description": "Agent Zero provides external API endpoints for integration with other applications. " - "These endpoints use API key authentication and support text messages and file attachments.", - "fields": external_api_fields, - "tab": "external", - } - - # Backup & Restore section - backup_fields: list[SettingsField] = [] - - backup_fields.append( - { - "id": "backup_create", - "title": "Create Backup", - "description": "Create a backup archive of selected files and configurations " - "using customizable patterns.", - "type": "button", - "value": "Create Backup", - } + out["settings"]["rfc_password"] = ( + PASSWORD_PLACEHOLDER if dotenv.get_dotenv_value(dotenv.KEY_RFC_PASSWORD) else "" ) - - backup_fields.append( - { - "id": "backup_restore", - "title": "Restore from Backup", - "description": "Restore files and configurations from a backup archive " - "with pattern-based selection.", - "type": "button", - "value": "Restore Backup", - } + out["settings"]["root_password"] = ( + PASSWORD_PLACEHOLDER + if dotenv.get_dotenv_value(dotenv.KEY_ROOT_PASSWORD) + else "" ) - backup_section: SettingsSection = { - "id": "backup_restore", - "title": "Backup & Restore", - "description": "Backup and restore Agent Zero data and configurations " - "using glob pattern-based file selection.", - "fields": backup_fields, - "tab": "backup", - } - - # Add the section to the result - result: SettingsOutput = { - "sections": [ - agent_section, - chat_model_section, - util_model_section, - browser_model_section, - embed_model_section, - memory_section, - speech_section, - api_keys_section, - litellm_section, - secrets_section, - auth_section, - mcp_client_section, - mcp_server_section, - a2a_section, - external_api_section, - backup_section, - dev_section, - # code_exec_section, - ] - } - return result + # secrets + secrets_manager = get_default_secrets_manager() + try: + out["settings"]["secrets"] = secrets_manager.get_masked_secrets() + except Exception: + out["settings"]["secrets"] = "" + + # mask API keys before sending to frontend + if isinstance(out["settings"].get("api_keys"), dict): + for provider, value in list(out["settings"]["api_keys"].items()): + if value: + out["settings"]["api_keys"][provider] = API_KEY_PLACEHOLDER + + # normalize certain fields + for key, value in list(out["settings"].items()): + # convert kwargs dicts to .env format + if (key.endswith("_kwargs") or key == "browser_http_headers") and isinstance( + value, dict + ): + out["settings"][key] = _dict_to_env(value) + return out def _get_api_key_field(settings: Settings, provider: str, title: str) -> SettingsField: @@ -1288,27 +359,21 @@ def _get_api_key_field(settings: Settings, provider: str, title: str) -> Setting } -def convert_in(settings: dict) -> Settings: +def convert_in(settings: Settings) -> Settings: current = get_settings() - for section in settings["sections"]: - if "fields" in section: - for field in section["fields"]: - # Skip saving if value is a placeholder - should_skip = ( - field["value"] == PASSWORD_PLACEHOLDER or - field["value"] == API_KEY_PLACEHOLDER - ) - if not should_skip: - # Special handling for browser_http_headers - if field["id"] == "browser_http_headers" or field["id"].endswith("_kwargs"): - current[field["id"]] = _env_to_dict(field["value"]) - elif field["id"].startswith("api_key_"): - current["api_keys"][field["id"]] = field["value"] - else: - current[field["id"]] = field["value"] + for key, value in settings.items(): + # Special handling for browser_http_headers and *_kwargs (stored as .env text) + if (key == "browser_http_headers" or key.endswith("_kwargs")) and isinstance( + value, str + ): + current[key] = _env_to_dict(value) + continue + + current[key] = value return current + def get_settings() -> Settings: global _settings if not _settings: @@ -1326,12 +391,19 @@ def set_settings(settings: Settings, apply: bool = True): _write_settings_file(_settings) if apply: _apply_settings(previous) + return _settings def set_settings_delta(delta: dict, apply: bool = True): current = get_settings() new = {**current, **delta} - set_settings(new, apply) # type: ignore + return set_settings(new, apply) # type: ignore + + +def merge_settings(original: Settings, delta: dict) -> Settings: + merged = original.copy() + merged.update(delta) + return merged def normalize_settings(settings: Settings) -> Settings: @@ -1403,106 +475,129 @@ def _remove_sensitive_settings(settings: Settings): def _write_sensitive_settings(settings: Settings): for key, val in settings["api_keys"].items(): - dotenv.save_dotenv_value(key.upper(), val) + if val != API_KEY_PLACEHOLDER: + dotenv.save_dotenv_value(key.upper(), val) dotenv.save_dotenv_value(dotenv.KEY_AUTH_LOGIN, settings["auth_login"]) - if settings["auth_password"]: + if settings["auth_password"] != PASSWORD_PLACEHOLDER: dotenv.save_dotenv_value(dotenv.KEY_AUTH_PASSWORD, settings["auth_password"]) - if settings["rfc_password"]: + if settings["rfc_password"] != PASSWORD_PLACEHOLDER: dotenv.save_dotenv_value(dotenv.KEY_RFC_PASSWORD, settings["rfc_password"]) - - if settings["root_password"]: - dotenv.save_dotenv_value(dotenv.KEY_ROOT_PASSWORD, settings["root_password"]) - if settings["root_password"]: - set_root_password(settings["root_password"]) + if settings["root_password"] != PASSWORD_PLACEHOLDER: + if runtime.is_dockerized(): + dotenv.save_dotenv_value( + dotenv.KEY_ROOT_PASSWORD, settings["root_password"] + ) + set_root_password(settings["root_password"]) # Handle secrets separately - merge with existing preserving comments/order and support deletions - secrets_manager = SecretsManager.get_instance() + secrets_manager = get_default_secrets_manager() submitted_content = settings["secrets"] secrets_manager.save_secrets_with_merge(submitted_content) - secrets_manager.clear_cache() # Clear cache to reload secrets - def get_default_settings() -> Settings: return Settings( version=_get_version(), - chat_model_provider="openrouter", - chat_model_name="openai/gpt-4.1", - chat_model_api_base="", - chat_model_kwargs={"temperature": "0"}, - chat_model_ctx_length=100000, - chat_model_ctx_history=0.7, - chat_model_vision=True, - chat_model_rl_requests=0, - chat_model_rl_input=0, - chat_model_rl_output=0, - util_model_provider="openrouter", - util_model_name="openai/gpt-4.1-mini", - util_model_api_base="", - util_model_ctx_length=100000, - util_model_ctx_input=0.7, - util_model_kwargs={"temperature": "0"}, - util_model_rl_requests=0, - util_model_rl_input=0, - util_model_rl_output=0, - embed_model_provider="huggingface", - embed_model_name="sentence-transformers/all-MiniLM-L6-v2", - embed_model_api_base="", - embed_model_kwargs={}, - embed_model_rl_requests=0, - embed_model_rl_input=0, - browser_model_provider="openrouter", - browser_model_name="openai/gpt-4.1", - browser_model_api_base="", - browser_model_vision=True, - browser_model_rl_requests=0, - browser_model_rl_input=0, - browser_model_rl_output=0, - browser_model_kwargs={"temperature": "0"}, - browser_http_headers={}, - memory_recall_enabled=True, - memory_recall_delayed=False, - memory_recall_interval=3, - memory_recall_history_len=10000, - memory_recall_memories_max_search=12, - memory_recall_solutions_max_search=8, - memory_recall_memories_max_result=5, - memory_recall_solutions_max_result=3, - memory_recall_similarity_threshold=0.7, - memory_recall_query_prep=True, - memory_recall_post_filter=True, - memory_memorize_enabled=True, - memory_memorize_consolidation=True, - memory_memorize_replace_threshold=0.9, + chat_model_provider=get_default_value("chat_model_provider", "openrouter"), + chat_model_name=get_default_value("chat_model_name", "openai/gpt-4.1"), + chat_model_api_base=get_default_value("chat_model_api_base", ""), + chat_model_kwargs=get_default_value("chat_model_kwargs", {"temperature": "0"}), + chat_model_ctx_length=get_default_value("chat_model_ctx_length", 100000), + chat_model_ctx_history=get_default_value("chat_model_ctx_history", 0.7), + chat_model_vision=get_default_value("chat_model_vision", True), + chat_model_rl_requests=get_default_value("chat_model_rl_requests", 0), + chat_model_rl_input=get_default_value("chat_model_rl_input", 0), + chat_model_rl_output=get_default_value("chat_model_rl_output", 0), + util_model_provider=get_default_value("util_model_provider", "openrouter"), + util_model_name=get_default_value("util_model_name", "openai/gpt-4.1-mini"), + util_model_api_base=get_default_value("util_model_api_base", ""), + util_model_ctx_length=get_default_value("util_model_ctx_length", 100000), + util_model_ctx_input=get_default_value("util_model_ctx_input", 0.7), + util_model_kwargs=get_default_value("util_model_kwargs", {"temperature": "0"}), + util_model_rl_requests=get_default_value("util_model_rl_requests", 0), + util_model_rl_input=get_default_value("util_model_rl_input", 0), + util_model_rl_output=get_default_value("util_model_rl_output", 0), + embed_model_provider=get_default_value("embed_model_provider", "huggingface"), + embed_model_name=get_default_value( + "embed_model_name", "sentence-transformers/all-MiniLM-L6-v2" + ), + embed_model_api_base=get_default_value("embed_model_api_base", ""), + embed_model_kwargs=get_default_value("embed_model_kwargs", {}), + embed_model_rl_requests=get_default_value("embed_model_rl_requests", 0), + embed_model_rl_input=get_default_value("embed_model_rl_input", 0), + browser_model_provider=get_default_value( + "browser_model_provider", "openrouter" + ), + browser_model_name=get_default_value("browser_model_name", "openai/gpt-4.1"), + browser_model_api_base=get_default_value("browser_model_api_base", ""), + browser_model_vision=get_default_value("browser_model_vision", True), + browser_model_rl_requests=get_default_value("browser_model_rl_requests", 0), + browser_model_rl_input=get_default_value("browser_model_rl_input", 0), + browser_model_rl_output=get_default_value("browser_model_rl_output", 0), + browser_model_kwargs=get_default_value( + "browser_model_kwargs", {"temperature": "0"} + ), + browser_http_headers=get_default_value("browser_http_headers", {}), + memory_recall_enabled=get_default_value("memory_recall_enabled", True), + memory_recall_delayed=get_default_value("memory_recall_delayed", False), + memory_recall_interval=get_default_value("memory_recall_interval", 3), + memory_recall_history_len=get_default_value("memory_recall_history_len", 10000), + memory_recall_memories_max_search=get_default_value( + "memory_recall_memories_max_search", 12 + ), + memory_recall_solutions_max_search=get_default_value( + "memory_recall_solutions_max_search", 8 + ), + memory_recall_memories_max_result=get_default_value( + "memory_recall_memories_max_result", 5 + ), + memory_recall_solutions_max_result=get_default_value( + "memory_recall_solutions_max_result", 3 + ), + memory_recall_similarity_threshold=get_default_value( + "memory_recall_similarity_threshold", 0.7 + ), + memory_recall_query_prep=get_default_value("memory_recall_query_prep", True), + memory_recall_post_filter=get_default_value("memory_recall_post_filter", True), + memory_memorize_enabled=get_default_value("memory_memorize_enabled", True), + memory_memorize_consolidation=get_default_value( + "memory_memorize_consolidation", True + ), + memory_memorize_replace_threshold=get_default_value( + "memory_memorize_replace_threshold", 0.9 + ), api_keys={}, auth_login="", auth_password="", root_password="", - agent_profile="agent0", - agent_memory_subdir="default", - agent_knowledge_subdir="custom", - rfc_auto_docker=True, - rfc_url="localhost", + agent_profile=get_default_value("agent_profile", "agent0"), + agent_memory_subdir=get_default_value("agent_memory_subdir", "default"), + agent_knowledge_subdir=get_default_value("agent_knowledge_subdir", "custom"), + rfc_auto_docker=get_default_value("rfc_auto_docker", True), + rfc_url=get_default_value("rfc_url", "localhost"), rfc_password="", - rfc_port_http=55080, - rfc_port_ssh=55022, - shell_interface="local" if runtime.is_dockerized() else "ssh", - stt_model_size="base", - stt_language="en", - stt_silence_threshold=0.3, - stt_silence_duration=1000, - stt_waiting_timeout=2000, - tts_kokoro=True, - mcp_servers='{\n "mcpServers": {}\n}', - mcp_client_init_timeout=10, - mcp_client_tool_timeout=120, - mcp_server_enabled=False, + rfc_port_http=get_default_value("rfc_port_http", 55080), + rfc_port_ssh=get_default_value("rfc_port_ssh", 55022), + shell_interface=get_default_value( + "shell_interface", "local" if runtime.is_dockerized() else "ssh" + ), + stt_model_size=get_default_value("stt_model_size", "base"), + stt_language=get_default_value("stt_language", "en"), + stt_silence_threshold=get_default_value("stt_silence_threshold", 0.3), + stt_silence_duration=get_default_value("stt_silence_duration", 1000), + stt_waiting_timeout=get_default_value("stt_waiting_timeout", 2000), + tts_kokoro=get_default_value("tts_kokoro", True), + mcp_servers=get_default_value("mcp_servers", '{\n "mcpServers": {}\n}'), + mcp_client_init_timeout=get_default_value("mcp_client_init_timeout", 10), + mcp_client_tool_timeout=get_default_value("mcp_client_tool_timeout", 120), + mcp_server_enabled=get_default_value("mcp_server_enabled", False), mcp_server_token=create_auth_token(), - a2a_server_enabled=False, + a2a_server_enabled=get_default_value("a2a_server_enabled", False), variables="", secrets="", - litellm_global_kwargs={}, + litellm_global_kwargs=get_default_value("litellm_global_kwargs", {}), + update_check_enabled=get_default_value("update_check_enabled", True), ) @@ -1586,9 +681,7 @@ async def update_mcp_settings(mcp_servers: str): ) # TODO overkill, replace with background task # update token in mcp server - current_token = ( - create_auth_token() - ) # TODO - ugly, token in settings is generated from dotenv and does not always correspond + current_token = create_auth_token() # TODO - ugly, token in settings is generated from dotenv and does not always correspond if not previous or current_token != previous["mcp_server_token"]: async def update_mcp_token(token: str): @@ -1617,16 +710,16 @@ def _env_to_dict(data: str): result = {} for line in data.splitlines(): line = line.strip() - if not line or line.startswith('#'): + if not line or line.startswith("#"): continue - - if '=' not in line: + + if "=" not in line: continue - - key, value = line.split('=', 1) + + key, value = line.split("=", 1) key = key.strip() value = value.strip() - + # If quoted, treat as string if value.startswith('"') and value.endswith('"'): result[key] = value[1:-1].replace('\\"', '"') # Unescape quotes @@ -1638,7 +731,7 @@ def _env_to_dict(data: str): result[key] = json.loads(value) except (json.JSONDecodeError, ValueError): result[key] = value - + return result @@ -1651,11 +744,11 @@ def _dict_to_env(data_dict): lines.append(f'{key}="{escaped_value}"') elif isinstance(value, (dict, list, bool)) or value is None: # Serialize as unquoted JSON - lines.append(f'{key}={json.dumps(value, separators=(",", ":"))}') + lines.append(f"{key}={json.dumps(value, separators=(',', ':'))}") else: # Numbers and other types as unquoted strings - lines.append(f'{key}={value}') - + lines.append(f"{key}={value}") + return "\n".join(lines) @@ -1707,8 +800,4 @@ def create_auth_token() -> str: def _get_version(): - try: - git_info = git.get_git_info() - return str(git_info.get("short_tag", "")).strip() or "unknown" - except Exception: - return "unknown" + return git.get_version() diff --git a/python/helpers/shell_local.py b/python/helpers/shell_local.py index cc0815f104..dcc14398d8 100644 --- a/python/helpers/shell_local.py +++ b/python/helpers/shell_local.py @@ -1,18 +1,20 @@ +import platform import select import subprocess import time import sys from typing import Optional, Tuple -from python.helpers import tty_session +from python.helpers import tty_session, runtime from python.helpers.shell_ssh import clean_string class LocalInteractiveSession: - def __init__(self): + def __init__(self, cwd: str|None = None): self.session: tty_session.TTYSession|None = None self.full_output = '' + self.cwd = cwd async def connect(self): - self.session = tty_session.TTYSession("/bin/bash") + self.session = tty_session.TTYSession(runtime.get_terminal_executable(), cwd=self.cwd) await self.session.start() await self.session.read_full_until_idle(idle_timeout=1, total_timeout=1) diff --git a/python/helpers/shell_ssh.py b/python/helpers/shell_ssh.py index 3d368eb402..bafcdbea08 100644 --- a/python/helpers/shell_ssh.py +++ b/python/helpers/shell_ssh.py @@ -14,7 +14,7 @@ class SSHInteractiveSession: # ps1_label = "SSHInteractiveSession CLI>" def __init__( - self, logger: Log, hostname: str, port: int, username: str, password: str + self, logger: Log, hostname: str, port: int, username: str, password: str, cwd: str|None = None ): self.logger = logger self.hostname = hostname @@ -27,6 +27,7 @@ def __init__( self.full_output = b"" self.last_command = b"" self.trimmed_command_length = 0 # Initialize trimmed_command_length + self.cwd = cwd async def connect(self, keepalive_interval: int = 5): """ @@ -60,7 +61,12 @@ async def connect(self, keepalive_interval: int = 5): # invoke interactive shell self.shell = self.client.invoke_shell(width=100, height=50) - self.shell.send("stty -echo\n".encode()) # disable local echo + + # disable systemd/OSC prompt metadata and disable local echo + initial_command = "unset PROMPT_COMMAND PS0; stty -echo" + if self.cwd: + initial_command = f"cd {self.cwd}; {initial_command}" + self.shell.send(f"{initial_command}\n".encode()) # wait for initial prompt/output to settle while True: @@ -99,7 +105,7 @@ async def send_command(self, command: str): self.last_command = command.encode() self.trimmed_command_length = 0 self.shell.send(self.last_command) - + async def read_output( self, timeout: float = 0, reset_full_output: bool = False ) -> Tuple[str, str]: diff --git a/python/helpers/strings.py b/python/helpers/strings.py index 1144b1f3b9..dac89c63e8 100644 --- a/python/helpers/strings.py +++ b/python/helpers/strings.py @@ -168,6 +168,7 @@ def _repl(match): path = match.group(1) try: # read file content + path = files.fix_dev_path(path) return files.read_file(path) except Exception: # if file not readable keep original placeholder diff --git a/python/helpers/subagents.py b/python/helpers/subagents.py new file mode 100644 index 0000000000..66f320f001 --- /dev/null +++ b/python/helpers/subagents.py @@ -0,0 +1,308 @@ +from python.helpers import files +from typing import TypedDict, TYPE_CHECKING +from pydantic import BaseModel, model_validator +import json +from typing import Literal + +GLOBAL_DIR = "." +USER_DIR = "usr" +DEFAULT_AGENTS_DIR = "agents" +USER_AGENTS_DIR = "usr/agents" + +type Origin = Literal["default", "user", "project"] + +if TYPE_CHECKING: + from agent import Agent + + +class SubAgentListItem(BaseModel): + name: str = "" + title: str = "" + description: str = "" + context: str = "" + origin: list[Origin] = [] + enabled: bool = True + + @model_validator(mode="after") + def post_validator(self): + if self.title == "": + self.title = self.name + return self + + +class SubAgent(SubAgentListItem): + prompts: dict[str, str] = {} + + +def get_agents_list(project_name: str | None = None) -> list[SubAgentListItem]: + return list(get_agents_dict(project_name).values()) + + +def get_agents_dict( + project_name: str | None = None, +) -> dict[str, SubAgentListItem]: + def _merge_agent_dicts( + base: dict[str, SubAgentListItem], + overrides: dict[str, SubAgentListItem], + ) -> dict[str, SubAgentListItem]: + merged: dict[str, SubAgentListItem] = dict(base) + for name, override in overrides.items(): + base_agent = merged.get(name) + merged[name] = ( + _merge_agent_list_items(base_agent, override) + if base_agent + else override + ) + return merged + + # load default and custom agents and merge + default_agents = _get_agents_list_from_dir(DEFAULT_AGENTS_DIR, origin="default") + custom_agents = _get_agents_list_from_dir(USER_AGENTS_DIR, origin="user") + merged = _merge_agent_dicts(default_agents, custom_agents) + + # merge with project agents if possible + if project_name: + from python.helpers import projects + + project_agents_dir = projects.get_project_meta_folder(project_name, "agents") + project_agents = _get_agents_list_from_dir(project_agents_dir, origin="project") + merged = _merge_agent_dicts(merged, project_agents) + + return merged + + +def _get_agents_list_from_dir(dir: str, origin: Origin) -> dict[str, SubAgentListItem]: + result: dict[str, SubAgentListItem] = {} + subdirs = files.get_subdirectories(dir) + + for subdir in subdirs: + try: + agent_json = files.read_file(files.get_abs_path(dir, subdir, "agent.json")) + agent_data = SubAgentListItem.model_validate_json(agent_json) + name = agent_data.name or subdir + agent_data.name = name + agent_data.origin = [origin] + result[name] = agent_data + except Exception: + continue + + return result + + +def load_agent_data(name: str, project_name: str | None = None) -> SubAgent: + def _merge_agent( + original: SubAgent | None, override: SubAgent | None = None + ) -> SubAgent | None: + if original and override: + return _merge_agents(original, override) + elif original: + return original + return override + + # load default and user agents and merge + default_agent = _load_agent_data_from_dir( + DEFAULT_AGENTS_DIR, name, origin="default" + ) + user_agent = _load_agent_data_from_dir(USER_AGENTS_DIR, name, origin="user") + merged = _merge_agent(default_agent, user_agent) + + # merge with project agent if possible + if project_name: + from python.helpers import projects + + project_agents_dir = projects.get_project_meta_folder(project_name, "agents") + project_agent = _load_agent_data_from_dir( + project_agents_dir, name, origin="project" + ) + merged = _merge_agent(merged, project_agent) + + if merged is None: + raise FileNotFoundError( + f"Agent '{name}' not found in default or custom directories" + ) + + return merged + + +def save_agent_data(name: str, subagent: SubAgent) -> None: + # write agent.json in custom directory + agent_dir = f"{USER_AGENTS_DIR}/{name}" + agent_json = { + "title": subagent.title, + "description": subagent.description, + "context": subagent.context, + "enabled": subagent.enabled, + } + files.write_file(f"{agent_dir}/agent.json", json.dumps(agent_json, indent=2)) + + # replace prompts in custom directory + prompts_dir = f"{agent_dir}/prompts" + # clear existing custom prompts directory (if any) + files.delete_dir(prompts_dir) + + prompts = subagent.prompts or {} + for name, content in prompts.items(): + safe_name = files.safe_file_name(name) + if not safe_name.endswith(".md"): + safe_name += ".md" + files.write_file(f"{prompts_dir}/{safe_name}", content) + + +def delete_agent_data(name: str) -> None: + files.delete_dir(f"{USER_AGENTS_DIR}/{name}") + + +def _load_agent_data_from_dir(dir: str, name: str, origin: Origin) -> SubAgent | None: + try: + subagent_json = files.read_file(files.get_abs_path(dir, name, "agent.json")) + subagent = SubAgent.model_validate_json(subagent_json) + except Exception: + # backward compatibility (before agent.json existed) + try: + context_file = files.read_file(files.get_abs_path(dir, name, "_context.md")) + except Exception: + context_file = "" + subagent = SubAgent( + name=name, + title=name, + description="", + context=context_file, + origin=[origin], + prompts={}, + ) + + # non-stored fields + subagent.name = name + subagent.origin = [origin] + + prompts_dir = f"{dir}/{name}/prompts" + try: + prompts = files.read_text_files_in_dir(prompts_dir, pattern="*.md") + except Exception: + prompts = {} + + subagent.prompts = prompts or {} + return subagent + + +def _merge_agents(base: SubAgent | None, override: SubAgent | None) -> SubAgent | None: + if base is None: + return override + if override is None: + return base + + merged_prompts: dict[str, str] = {} + merged_prompts.update(base.prompts or {}) + merged_prompts.update(override.prompts or {}) + + return SubAgent( + name=override.name, + title=override.title, + description=override.description, + context=override.context, + origin=_merge_origins(base.origin, override.origin), + prompts=merged_prompts, + ) + + +def _merge_agent_list_items( + base: SubAgentListItem, override: SubAgentListItem +) -> SubAgentListItem: + return SubAgentListItem( + name=override.name or base.name, + title=override.title or base.title, + description=override.description or base.description, + context=override.context or base.context, + origin=_merge_origins(base.origin, override.origin), + ) + + +def _merge_origins(base: list[Origin], override: list[Origin]) -> list[Origin]: + return base + override + + +def get_default_promp_file_names() -> list[str]: + return files.list_files("prompts", filter="*.md") + + +def get_available_agents_dict( + project_name: str | None, +) -> dict[str, SubAgentListItem]: + # all available agents + all_agents = get_agents_dict() + # filter by project settings + from python.helpers import projects + + project_settings = ( + projects.load_project_subagents(project_name) if project_name else {} + ) + + filtered_agents: dict[str, SubAgentListItem] = {} + for name, agent in all_agents.items(): + if name in project_settings: + agent.enabled = project_settings[name]["enabled"] + if agent.enabled: + filtered_agents[name] = agent + return filtered_agents + + +def get_paths( + agent: "Agent|None", + *subpaths, + must_exist_completely: bool = True, + include_project: bool = True, + include_user: bool = True, + include_default: bool = True, + default_root: str = "", +) -> list[str]: + """Returns list of file paths for the given agent and subpaths, searched in order of priority: + project/agents/, project/, usr/agents/, agents/, usr/, default.""" + paths: list[str] = [] + check_subpaths = subpaths if must_exist_completely else [] + profile_name = agent.config.profile if agent and agent.config.profile else "" + project_name = "" + + if include_project and agent: + from python.helpers import projects + + project_name = projects.get_context_project_name(agent.context) or "" + + if project_name and profile_name: + # project/agents//... + project_agent_dir = projects.get_project_meta_folder( + project_name, "agents", profile_name + ) + if files.exists(files.get_abs_path(project_agent_dir, *check_subpaths)): + paths.append(files.get_abs_path(project_agent_dir, *subpaths)) + + if project_name: + # project/.a0proj/... + path = projects.get_project_meta_folder(project_name, *subpaths) + if (not must_exist_completely) or files.exists(path): + paths.append(path) + + if profile_name: + + # usr/agents//... + path = files.get_abs_path(USER_AGENTS_DIR, profile_name, *subpaths) + if (not must_exist_completely) or files.exists(files.get_abs_path(USER_AGENTS_DIR, profile_name, *check_subpaths)): + paths.append(path) + + # agents//... + path = files.get_abs_path(DEFAULT_AGENTS_DIR, profile_name, *subpaths) + if (not must_exist_completely) or files.exists(files.get_abs_path(DEFAULT_AGENTS_DIR, profile_name, *check_subpaths)): + paths.append(path) + + if include_user: + # usr/... + path = files.get_abs_path(USER_DIR, *subpaths) + if (not must_exist_completely) or files.exists(path): + paths.append(path) + + if include_default: + # default_root/... + path = files.get_abs_path(default_root, *subpaths) + if (not must_exist_completely) or files.exists(path): + paths.append(path) + + return paths diff --git a/python/helpers/task_scheduler.py b/python/helpers/task_scheduler.py index 9bfabcece6..1938db367e 100644 --- a/python/helpers/task_scheduler.py +++ b/python/helpers/task_scheduler.py @@ -22,6 +22,7 @@ from python.helpers.defer import DeferredTask from python.helpers.files import get_abs_path, make_dirs, read_file, write_file from python.helpers.localization import Localization +from python.helpers import projects, guids import pytz from typing import Annotated @@ -117,13 +118,15 @@ def should_launch(self) -> datetime | None: class BaseTask(BaseModel): - uuid: str = Field(default_factory=lambda: str(uuid.uuid4())) + uuid: str = Field(default_factory=lambda: guids.generate_id()) context_id: Optional[str] = Field(default=None) state: TaskState = Field(default=TaskState.IDLE) name: str = Field() system_prompt: str prompt: str attachments: list[str] = Field(default_factory=list) + project_name: str | None = Field(default=None) + project_color: str | None = Field(default=None) created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) last_run: datetime | None = None @@ -181,6 +184,9 @@ def check_schedule(self, frequency_seconds: float = 60.0) -> bool: def get_next_run(self) -> datetime | None: return None + def is_dedicated(self) -> bool: + return self.context_id == self.uuid + def get_next_run_minutes(self) -> int | None: next_run = self.get_next_run() if next_run is None: @@ -209,7 +215,7 @@ async def on_error(self, error: str): last_result=f"ERROR: {error}" ) if not updated_task: - PrintStyle(italic=True, font_color="red", padding=False).print( + PrintStyle.error( f"Failed to update task {self.uuid} state to ERROR after error: {error}" ) await scheduler.save() # Force save after update @@ -225,7 +231,7 @@ async def on_success(self, result: str): last_result=result ) if not updated_task: - PrintStyle(italic=True, font_color="red", padding=False).print( + PrintStyle.error( f"Failed to update task {self.uuid} state to IDLE after success" ) await scheduler.save() # Force save after update @@ -243,14 +249,18 @@ def create( prompt: str, token: str, attachments: list[str] = list(), - context_id: str | None = None + context_id: str | None = None, + project_name: str | None = None, + project_color: str | None = None ): return cls(name=name, system_prompt=system_prompt, prompt=prompt, attachments=attachments, token=token, - context_id=context_id) + context_id=context_id, + project_name=project_name, + project_color=project_color) def update(self, name: str | None = None, @@ -288,7 +298,9 @@ def create( schedule: TaskSchedule, attachments: list[str] = list(), context_id: str | None = None, - timezone: str | None = None + timezone: str | None = None, + project_name: str | None = None, + project_color: str | None = None, ): # Set timezone in schedule if provided if timezone is not None: @@ -301,7 +313,9 @@ def create( prompt=prompt, attachments=attachments, schedule=schedule, - context_id=context_id) + context_id=context_id, + project_name=project_name, + project_color=project_color) def update(self, name: str | None = None, @@ -365,14 +379,18 @@ def create( prompt: str, plan: TaskPlan, attachments: list[str] = list(), - context_id: str | None = None + context_id: str | None = None, + project_name: str | None = None, + project_color: str | None = None ): return cls(name=name, system_prompt=system_prompt, prompt=prompt, plan=plan, attachments=attachments, - context_id=context_id) + context_id=context_id, + project_name=project_name, + project_color=project_color) def update(self, name: str | None = None, @@ -486,12 +504,12 @@ async def save(self) -> "SchedulerTaskList": for task in self.tasks: if isinstance(task, AdHocTask): if task.token is None or task.token == "": - PrintStyle(italic=True, font_color="red", padding=False).print( + PrintStyle.warning( f"WARNING: AdHocTask {task.name} ({task.uuid}) has a null or empty token before saving: '{task.token}'" ) # Generate a new token to prevent errors task.token = str(random.randint(1000000000000000000, 9999999999999999999)) - PrintStyle(italic=True, font_color="red", padding=False).print( + PrintStyle.info( f"Fixed: Generated new token '{task.token}' for task {task.name}" ) @@ -504,7 +522,7 @@ async def save(self) -> "SchedulerTaskList": # Debug: check if 'null' appears as token value in JSON if '"type": "adhoc"' in json_data and '"token": null' in json_data: - PrintStyle(italic=True, font_color="red", padding=False).print( + PrintStyle.error( "ERROR: Found null token in JSON output for an adhoc task" ) @@ -514,7 +532,7 @@ async def save(self) -> "SchedulerTaskList": if exists(path): loaded_json = read_file(path) if '"type": "adhoc"' in loaded_json and '"token": null' in loaded_json: - PrintStyle(italic=True, font_color="red", padding=False).print( + PrintStyle.error( "ERROR: Null token persisted in JSON file for an adhoc task" ) @@ -601,6 +619,8 @@ class TaskScheduler: _tasks: SchedulerTaskList _printer: PrintStyle _instance = None + _running_deferred_tasks: Dict[str, DeferredTask] + _running_tasks_lock: threading.RLock @classmethod def get(cls) -> "TaskScheduler": @@ -613,8 +633,38 @@ def __init__(self): if not hasattr(self, '_initialized'): self._tasks = SchedulerTaskList.get() self._printer = PrintStyle(italic=True, font_color="green", padding=False) + self._running_deferred_tasks = {} + self._running_tasks_lock = threading.RLock() self._initialized = True + def _register_running_task(self, task_uuid: str, deferred_task: DeferredTask) -> None: + with self._running_tasks_lock: + self._running_deferred_tasks[task_uuid] = deferred_task + + def _unregister_running_task(self, task_uuid: str) -> None: + with self._running_tasks_lock: + self._running_deferred_tasks.pop(task_uuid, None) + + def cancel_running_task(self, task_uuid: str, terminate_thread: bool = False) -> bool: + with self._running_tasks_lock: + deferred_task = self._running_deferred_tasks.get(task_uuid) + if not deferred_task: + return False + PrintStyle.info(f"Scheduler cancelling task {task_uuid}") + deferred_task.kill(terminate_thread=terminate_thread) + return True + + def cancel_tasks_by_context(self, context_id: str, terminate_thread: bool = False) -> bool: + cancelled_any = False + with self._running_tasks_lock: + running_tasks = list(self._running_deferred_tasks.keys()) + for task_uuid in running_tasks: + task = self.get_task_by_uuid(task_uuid) + if task and task.context_id == context_id: + if self.cancel_running_task(task_uuid, terminate_thread=terminate_thread): + cancelled_any = True + return cancelled_any + async def reload(self): await self._tasks.reload() @@ -669,7 +719,7 @@ async def run_task_by_uuid(self, task_uuid: str, task_context: str | None = None # If the task is in error state, reset it to IDLE first if task.state == TaskState.ERROR: - self._printer.print(f"Resetting task '{task.name}' from ERROR to IDLE state before running") + PrintStyle.info(f"Resetting task '{task.name}' from ERROR to IDLE state before running") await self.update_task(task_uuid, state=TaskState.IDLE) # Force a reload to ensure we have the updated state await self._tasks.reload() @@ -719,6 +769,10 @@ async def __new_context(self, task: Union[ScheduledTask, AdHocTask, PlannedTask] # initial name before renaming is same as task name # context.name = task.name + # Activate project if set + if task.project_name: + projects.activate_project(context.id, task.project_name) + # Save the context save_tmp_chat(context) return context @@ -728,13 +782,13 @@ async def _get_chat_context(self, task: Union[ScheduledTask, AdHocTask, PlannedT if context: assert isinstance(context, AgentContext) - self._printer.print( + PrintStyle.info( f"Scheduler Task {task.name} loaded from task {task.uuid}, context ok" ) save_tmp_chat(context) return context else: - self._printer.print( + PrintStyle.warning( f"Scheduler Task {task.name} loaded from task {task.uuid} but context not found" ) return await self.__new_context(task) @@ -751,20 +805,24 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None): # preflight checks with a snapshot of the task task_snapshot: Union[ScheduledTask, AdHocTask, PlannedTask] | None = self.get_task_by_uuid(task_uuid) if task_snapshot is None: - self._printer.print(f"Scheduler Task with UUID '{task_uuid}' not found") + PrintStyle.error(f"Scheduler Task with UUID '{task_uuid}' not found") + self._unregister_running_task(task_uuid) return if task_snapshot.state == TaskState.RUNNING: - self._printer.print(f"Scheduler Task '{task_snapshot.name}' already running, skipping") + PrintStyle.warning(f"Scheduler Task '{task_snapshot.name}' already running, skipping") + self._unregister_running_task(task_uuid) return # Atomically fetch and check the task's current state current_task = await self.update_task_checked(task_uuid, lambda task: task.state != TaskState.RUNNING, state=TaskState.RUNNING) if not current_task: - self._printer.print(f"Scheduler Task with UUID '{task_uuid}' not found or updated by another process") + PrintStyle.error(f"Scheduler Task with UUID '{task_uuid}' not found or updated by another process") + self._unregister_running_task(task_uuid) return if current_task.state != TaskState.RUNNING: # This means the update failed due to state conflict - self._printer.print(f"Scheduler Task '{current_task.name}' state is '{current_task.state}', skipping") + PrintStyle.warning(f"Scheduler Task '{current_task.name}' state is '{current_task.state}', skipping") + self._unregister_running_task(task_uuid) return await current_task.on_run() @@ -773,9 +831,10 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None): agent = None try: - self._printer.print(f"Scheduler Task '{current_task.name}' started") + PrintStyle.info(f"Scheduler Task '{current_task.name}' started") context = await self._get_chat_context(current_task) + AgentContext.use(context.id) # Ensure the context is properly registered in the AgentContext._contexts # This is critical for the polling mechanism to find and stream logs @@ -795,9 +854,9 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None): if url.scheme in ["http", "https", "ftp", "ftps", "sftp"]: attachment_filenames.append(attachment) else: - self._printer.print(f"Skipping attachment: [{attachment}]") + PrintStyle.warning(f"Skipping attachment: [{attachment}]") except Exception: - self._printer.print(f"Skipping attachment: [{attachment}]") + PrintStyle.warning(f"Skipping attachment: [{attachment}]") self._printer.print("User message:") self._printer.print(f"> {current_task.prompt}") @@ -834,7 +893,7 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None): result = await agent.monologue() # Success - self._printer.print(f"Scheduler Task '{current_task.name}' completed: {result}") + PrintStyle.success(f"Scheduler Task '{current_task.name}' completed: {result}") await self._persist_chat(current_task, context) await current_task.on_success(result) @@ -842,36 +901,57 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None): await self._tasks.reload() updated_task = self.get_task_by_uuid(task_uuid) if updated_task and updated_task.state != TaskState.IDLE: - self._printer.print(f"Fixing task state consistency: '{current_task.name}' state is not IDLE after success") + PrintStyle.warning(f"Fixing task state consistency: '{current_task.name}' state is not IDLE after success") await self.update_task(task_uuid, state=TaskState.IDLE) + except asyncio.CancelledError: + PrintStyle.warning(f"Scheduler Task '{current_task.name}' cancelled by user") + try: + await asyncio.shield(self.update_task(task_uuid, state=TaskState.IDLE)) + except Exception: + pass + raise except Exception as e: # Error - self._printer.print(f"Scheduler Task '{current_task.name}' failed: {e}") + PrintStyle.error(f"Scheduler Task '{current_task.name}' failed: {e}") await current_task.on_error(str(e)) # Explicitly verify task was updated in storage after error await self._tasks.reload() updated_task = self.get_task_by_uuid(task_uuid) if updated_task and updated_task.state != TaskState.ERROR: - self._printer.print(f"Fixing task state consistency: '{current_task.name}' state is not ERROR after failure") + PrintStyle.warning(f"Fixing task state consistency: '{current_task.name}' state is not ERROR after failure") await self.update_task(task_uuid, state=TaskState.ERROR) if agent: agent.handle_critical_exception(e) finally: # Call on_finish for task-specific cleanup - await current_task.on_finish() + try: + await asyncio.shield(current_task.on_finish()) + except asyncio.CancelledError: + pass + except Exception: + pass # Make one final save to ensure all states are persisted - await self._tasks.save() + try: + await asyncio.shield(self._tasks.save()) + except asyncio.CancelledError: + pass + except Exception: + pass + + self._unregister_running_task(task_uuid) deferred_task = DeferredTask(thread_name=self.__class__.__name__) + self._register_running_task(task.uuid, deferred_task) deferred_task.start_task(_run_task_wrapper, task.uuid, task_context) - # Ensure background execution doesn't exit immediately on async await, especially in script contexts - # This helps prevent premature exits when running from non-event-loop contexts - asyncio.create_task(asyncio.sleep(0.1)) + # Ensure background execution doesn't exit immediately on async await, especially in script contexts. + # Yielding briefly keeps callers like CLI scripts alive long enough for the DeferredTask thread to spin up + # without leaving stray pending tasks that trigger \"Task was destroyed\" warnings when the loop shuts down. + await asyncio.sleep(0.1) def serialize_all_tasks(self) -> list[Dict[str, Any]]: """ @@ -1014,7 +1094,7 @@ def parse_task_plan(plan_data: Dict[str, Any]) -> TaskPlan: done=done_dates_cast ) except Exception as e: - PrintStyle(italic=True, font_color="red", padding=False).print( + PrintStyle.error( f"Error parsing task plan: {e}" ) # Return empty plan instead of failing @@ -1036,12 +1116,19 @@ def serialize_task(task: Union[ScheduledTask, AdHocTask, PlannedTask]) -> Dict[s "system_prompt": task.system_prompt, "prompt": task.prompt, "attachments": task.attachments, + "project_name": task.project_name, + "project_color": task.project_color, "created_at": serialize_datetime(task.created_at), "updated_at": serialize_datetime(task.updated_at), "last_run": serialize_datetime(task.last_run), "next_run": serialize_datetime(task.get_next_run()), "last_result": task.last_result, - "context_id": task.context_id + "context_id": task.context_id, + "dedicated_context": task.is_dedicated(), + "project": { + "name": task.project_name, + "color": task.project_color, + }, } # Add type-specific fields @@ -1101,11 +1188,13 @@ def deserialize_task(task_data: Dict[str, Any], task_class: Optional[Type[T]] = "system_prompt": task_data.get("system_prompt", ""), "prompt": task_data.get("prompt", ""), "attachments": task_data.get("attachments", []), + "project_name": task_data.get("project_name"), + "project_color": task_data.get("project_color"), "created_at": parse_datetime(task_data.get("created_at")), "updated_at": parse_datetime(task_data.get("updated_at")), "last_run": parse_datetime(task_data.get("last_run")), "last_result": task_data.get("last_result"), - "context_id": task_data.get("context_id") + "context_id": task_data.get("context_id"), } # Add type-specific fields diff --git a/python/helpers/tokens.py b/python/helpers/tokens.py index 622c6d5be1..42224a495c 100644 --- a/python/helpers/tokens.py +++ b/python/helpers/tokens.py @@ -13,7 +13,7 @@ def count_tokens(text: str, encoding_name="cl100k_base") -> int: encoding = tiktoken.get_encoding(encoding_name) # Encode the text and count the tokens - tokens = encoding.encode(text) + tokens = encoding.encode(text, disallowed_special=()) token_count = len(tokens) return token_count diff --git a/python/helpers/tool.py b/python/helpers/tool.py index 5825958efd..2613f1c5d9 100644 --- a/python/helpers/tool.py +++ b/python/helpers/tool.py @@ -22,11 +22,20 @@ def __init__(self, agent: Agent, name: str, method: str | None, args: dict[str,s self.args = args self.loop_data = loop_data self.message = message + self.progress: str = "" @abstractmethod async def execute(self,**kwargs) -> Response: pass + def set_progress(self, content: str | None): + self.progress = content or "" + + def add_progress(self, content: str | None): + if not content: + return + self.progress += content + async def before_execution(self, **kwargs): PrintStyle(font_color="#1B4F72", padding=True, background_color="white", bold=True).print(f"{self.agent.agent_name}: Using tool '{self.name}'") self.log = self.get_log_object() diff --git a/python/helpers/tty_session.py b/python/helpers/tty_session.py index 68b546f67e..d6a3f91a60 100644 --- a/python/helpers/tty_session.py +++ b/python/helpers/tty_session.py @@ -204,53 +204,66 @@ async def drain(self): async def _spawn_winpty(cmd, cwd, env, echo): - # A quick way to silence command echo in cmd.exe is /Q (quiet) - if not echo and cmd.strip().lower().startswith("cmd") and "/q" not in cmd.lower(): - cmd = cmd.replace("cmd.exe", "cmd.exe /Q") + # Clean PowerShell startup: no logo, no profile, bypass execution policy for deterministic behavior + if cmd.strip().lower().startswith("powershell"): + if "-nolog" not in cmd.lower(): + cmd = cmd.replace("powershell.exe", "powershell.exe -NoLogo -NoProfile -ExecutionPolicy Bypass", 1) cols, rows = 80, 25 - pty = winpty.PTY(cols, rows) # type: ignore - child = pty.spawn(cmd, cwd=cwd or os.getcwd(), env=env) - - master_r_fd = msvcrt.open_osfhandle(child.conout_pipe, os.O_RDONLY) # type: ignore - master_w_fd = msvcrt.open_osfhandle(child.conin_pipe, 0) # type: ignore + child = winpty.PtyProcess.spawn(cmd, dimensions=(rows, cols), cwd=cwd or os.getcwd(), env=env) # type: ignore loop = asyncio.get_running_loop() reader = asyncio.StreamReader() - def _on_data(): - try: - data = os.read(master_r_fd, 1 << 16) - except OSError: - data = b"" - if data: - reader.feed_data(data) - else: - reader.feed_eof() - loop.remove_reader(master_r_fd) + async def _on_data(): + while child.isalive(): + try: + # Run blocking read in executor to not block event loop + data = await loop.run_in_executor(None, child.read, 1 << 16) + if data: + reader.feed_data(data.encode('utf-8') if isinstance(data, str) else data) + except EOFError: + break + except Exception: + await asyncio.sleep(0.01) + reader.feed_eof() - loop.add_reader(master_r_fd, _on_data) + # Start pumping output in background + asyncio.create_task(_on_data()) class _Stdin: def write(self, d): - os.write(master_w_fd, d) + # Use winpty's write method, not os.write + if isinstance(d, bytes): + d = d.decode('utf-8', errors='replace') + # Windows needs \r\n for proper line endings + if _IS_WIN: + d = d.replace('\n', '\r\n') + child.write(d) async def drain(self): - await asyncio.sleep(0) + await asyncio.sleep(0.01) # Give write time to complete - class _Proc(asyncio.subprocess.Process): + class _Proc: def __init__(self): self.stdin = _Stdin() # type: ignore self.stdout = reader self.pid = child.pid + self.returncode = None async def wait(self): while child.isalive(): await asyncio.sleep(0.2) + self.returncode = 0 return 0 + def terminate(self): + if child.isalive(): + child.terminate() + def kill(self): - child.kill() + if child.isalive(): + child.kill() return _Proc() @@ -259,7 +272,7 @@ def kill(self): if __name__ == "__main__": async def interactive_shell(): - shell_cmd, prompt_hint = ("cmd.exe", "$") if _IS_WIN else ("/bin/bash", "$") + shell_cmd, prompt_hint = ("powershell.exe", ">") if _IS_WIN else ("/bin/bash", "$") # echo=False β†’ suppress the shell’s own echo of commands term = TTYSession(shell_cmd) diff --git a/python/helpers/tunnel_manager.py b/python/helpers/tunnel_manager.py index 93e8ec635c..42a1d87cd0 100644 --- a/python/helpers/tunnel_manager.py +++ b/python/helpers/tunnel_manager.py @@ -1,6 +1,13 @@ -from flaredantic import FlareTunnel, FlareConfig, ServeoConfig, ServeoTunnel +from flaredantic import ( + FlareTunnel, FlareConfig, + ServeoConfig, ServeoTunnel, + MicrosoftTunnel, MicrosoftConfig, + notifier, NotifyData, NotifyEvent +) import threading +from collections import deque +from python.helpers.print_style import PrintStyle # Singleton to manage the tunnel instance class TunnelManager: @@ -19,6 +26,35 @@ def __init__(self): self.tunnel_url = None self.is_running = False self.provider = None + self.notifications = deque(maxlen=50) + self._subscribed = False + + def _on_notify(self, data: NotifyData): + """Handle notifications from flaredantic""" + self.notifications.append({ + "event": data.event.value, + "message": data.message, + "data": data.data + }) + + def _ensure_subscribed(self): + """Subscribe to flaredantic notifications if not already""" + if not self._subscribed: + notifier.subscribe(self._on_notify) + self._subscribed = True + + def get_notifications(self): + """Get and clear pending notifications""" + notifications = list(self.notifications) + self.notifications.clear() + return notifications + + def get_last_error(self): + """Check for recent error in notifications without clearing""" + for n in reversed(list(self.notifications)): + if n['event'] == NotifyEvent.ERROR.value: + return n['message'] + return None def start_tunnel(self, port=80, provider="serveo"): """Start a new tunnel or return the existing one's URL""" @@ -26,6 +62,8 @@ def start_tunnel(self, port=80, provider="serveo"): return self.tunnel_url self.provider = provider + self._ensure_subscribed() + self.notifications.clear() try: # Start tunnel in a separate thread to avoid blocking @@ -34,6 +72,9 @@ def run_tunnel(): if self.provider == "cloudflared": config = FlareConfig(port=port, verbose=True) self.tunnel = FlareTunnel(config) + elif self.provider == "microsoft": + config = MicrosoftConfig(port=port, verbose=True) # type: ignore + self.tunnel = MicrosoftTunnel(config) else: # Default to serveo config = ServeoConfig(port=port) # type: ignore self.tunnel = ServeoTunnel(config) @@ -42,23 +83,34 @@ def run_tunnel(): self.tunnel_url = self.tunnel.tunnel_url self.is_running = True except Exception as e: - print(f"Error in tunnel thread: {str(e)}") + error_msg = str(e) + PrintStyle.error(f"Error in tunnel thread: {error_msg}") + self.notifications.append({ + "event": NotifyEvent.ERROR.value, + "message": error_msg, + "data": None + }) tunnel_thread = threading.Thread(target=run_tunnel) tunnel_thread.daemon = True tunnel_thread.start() - # Wait for tunnel to start (max 15 seconds instead of 5) - for _ in range(150): # Increased from 50 to 150 iterations + # Wait for tunnel to start (no timeout - user may need time for login) + import time + while True: if self.tunnel_url: break - import time - + # Check if we have errors + if any(n['event'] == NotifyEvent.ERROR.value for n in self.notifications): + break + # Check if thread died without producing URL + if not tunnel_thread.is_alive(): + break time.sleep(0.1) return self.tunnel_url except Exception as e: - print(f"Error starting tunnel: {str(e)}") + PrintStyle.error(f"Error starting tunnel: {str(e)}") return None def stop_tunnel(self): diff --git a/python/helpers/update_check.py b/python/helpers/update_check.py new file mode 100644 index 0000000000..ce083e1c9f --- /dev/null +++ b/python/helpers/update_check.py @@ -0,0 +1,15 @@ +from python.helpers import git, runtime +import hashlib + +async def check_version(): + import httpx + + current_version = git.get_version() + anonymized_id = hashlib.sha256(runtime.get_persistent_id().encode()).hexdigest()[:20] + + url = "https://api.agent-zero.ai/a0-update-check" + payload = {"current_version": current_version, "anonymized_id": anonymized_id} + async with httpx.AsyncClient() as client: + response = await client.post(url, json=payload) + version = response.json() + return version \ No newline at end of file diff --git a/python/helpers/vector_db.py b/python/helpers/vector_db.py index 20254685b8..8a813cabad 100644 --- a/python/helpers/vector_db.py +++ b/python/helpers/vector_db.py @@ -1,5 +1,4 @@ from typing import Any, List, Sequence -import uuid from langchain_community.vectorstores import FAISS # faiss needs to be patched for python 3.12 on arm #TODO remove once not needed @@ -14,8 +13,10 @@ DistanceStrategy, ) from langchain.embeddings import CacheBackedEmbeddings +from simpleeval import simple_eval from agent import Agent +from python.helpers import guids class MyFaiss(FAISS): @@ -98,7 +99,7 @@ async def search_by_metadata(self, filter: str, limit: int = 0) -> list[Document return result async def insert_documents(self, docs: list[Document]): - ids = [str(uuid.uuid4()) for _ in range(len(docs))] + ids = [guids.generate_id() for _ in range(len(docs))] if ids: for doc, id in zip(docs, ids): @@ -140,7 +141,7 @@ def cosine_normalizer(val: float) -> float: def get_comparator(condition: str): def comparator(data: dict[str, Any]): try: - result = eval(condition, {}, data) + result = simple_eval(condition, {}, data) return result except Exception as e: # PrintStyle.error(f"Error evaluating condition: {e}") diff --git a/python/helpers/wait.py b/python/helpers/wait.py new file mode 100644 index 0000000000..83f2886193 --- /dev/null +++ b/python/helpers/wait.py @@ -0,0 +1,68 @@ +import asyncio +from datetime import datetime, timezone + +from python.helpers.print_style import PrintStyle + + +def format_remaining_time(total_seconds: float) -> str: + if total_seconds < 0: + total_seconds = 0 + + days, remainder = divmod(total_seconds, 86400) + hours, remainder = divmod(remainder, 3600) + minutes, seconds = divmod(remainder, 60) + + days = int(days) + hours = int(hours) + minutes = int(minutes) + + parts = [] + if days > 0: + parts.append(f"{days}d") + if hours > 0: + parts.append(f"{hours}h") + if minutes > 0: + parts.append(f"{minutes}m") + + if days > 0 or hours > 0: + if seconds >= 1: + parts.append(f"{int(seconds)}s") + elif minutes > 0: + if seconds >= 0.1: + parts.append(f"{seconds:.1f}s") + else: + parts.append(f"{total_seconds:.1f}s") + + if not parts: + return "0.0s remaining" + + return " ".join(parts) + " remaining" + + +async def managed_wait(agent, target_time, is_duration_wait, log, get_heading_callback): + + while datetime.now(timezone.utc) < target_time: + before_intervention = datetime.now(timezone.utc) + await agent.handle_intervention() + after_intervention = datetime.now(timezone.utc) + + if is_duration_wait: + pause_duration = after_intervention - before_intervention + if pause_duration.total_seconds() > 1.5: # Adjust for pauses longer than the sleep cycle + target_time += pause_duration + PrintStyle.info( + f"Wait extended by {pause_duration.total_seconds():.1f}s to {target_time.isoformat()}...", + ) + + current_time = datetime.now(timezone.utc) + if current_time >= target_time: + break + + remaining_seconds = (target_time - current_time).total_seconds() + if log: + log.update(heading=get_heading_callback(format_remaining_time(remaining_seconds))) + sleep_duration = min(1.0, remaining_seconds) + + await asyncio.sleep(sleep_duration) + + return target_time diff --git a/python/tools/behaviour_adjustment.py b/python/tools/behaviour_adjustment.py index 735a39dfa4..31fe67cd70 100644 --- a/python/tools/behaviour_adjustment.py +++ b/python/tools/behaviour_adjustment.py @@ -52,13 +52,13 @@ async def log_callback(content): def get_custom_rules_file(agent: Agent): - return memory.get_memory_subdir_abs(agent) + f"/behaviour.md" + return files.get_abs_path(memory.get_memory_subdir_abs(agent), "behaviour.md") def read_rules(agent: Agent): rules_file = get_custom_rules_file(agent) if files.exists(rules_file): - rules = files.read_prompt_file(rules_file) + rules = agent.read_prompt(rules_file) return agent.read_prompt("agent.system.behaviour.md", rules=rules) else: rules = agent.read_prompt("agent.system.behaviour_default.md") diff --git a/python/tools/browser_agent.py b/python/tools/browser_agent.py index 949543a522..6d5f085b26 100644 --- a/python/tools/browser_agent.py +++ b/python/tools/browser_agent.py @@ -9,7 +9,7 @@ from python.helpers.browser_use import browser_use # type: ignore[attr-defined] from python.helpers.print_style import PrintStyle from python.helpers.playwright import ensure_playwright_binary -from python.helpers.secrets import SecretsManager +from python.helpers.secrets import get_secrets_manager from python.extensions.message_loop_start._10_iteration_no import get_iter_no from pydantic import BaseModel import uuid @@ -153,7 +153,7 @@ async def complete_task(params: DoneResult): try: - secrets_manager = SecretsManager.get_instance() + secrets_manager = get_secrets_manager(self.agent.context) secrets_dict = secrets_manager.load_secrets() self.use_agent = browser_use.Agent( @@ -216,7 +216,7 @@ async def execute(self, message="", reset="", **kwargs): self.guid = self.agent.context.generate_id() # short random id reset = str(reset).lower().strip() == "true" await self.prepare_state(reset=reset) - message = SecretsManager.get_instance().mask_values(message, placeholder="{key}") # mask any potential passwords passed from A0 to browser-use to browser-use format + message = get_secrets_manager(self.agent.context).mask_values(message, placeholder="{key}") # mask any potential passwords passed from A0 to browser-use to browser-use format task = self.state.start_task(message) if self.state else None # wait for browser agent to finish and update progress with timeout @@ -394,7 +394,7 @@ def update_progress(self, text): def _mask(self, text: str) -> str: try: - return SecretsManager.get_instance().mask_values(text or "") + return get_secrets_manager(self.agent.context).mask_values(text or "") except Exception as e: return text or "" diff --git a/python/tools/code_execution_tool.py b/python/tools/code_execution_tool.py index 0b0a054167..a37056057b 100644 --- a/python/tools/code_execution_tool.py +++ b/python/tools/code_execution_tool.py @@ -3,7 +3,7 @@ import shlex import time from python.helpers.tool import Tool, Response -from python.helpers import files, rfc_exchange +from python.helpers import files, rfc_exchange, projects, runtime from python.helpers.print_style import PrintStyle from python.helpers.shell_local import LocalInteractiveSession from python.helpers.shell_ssh import SSHInteractiveSession @@ -12,21 +12,58 @@ from python.helpers.messages import truncate_text as truncate_text_agent import re +# Timeouts for python, nodejs, and terminal runtimes. +CODE_EXEC_TIMEOUTS: dict[str, int] = { + "first_output_timeout": 30, + "between_output_timeout": 15, + "max_exec_timeout": 180, + "dialog_timeout": 5, +} + +# Timeouts for output runtime. +OUTPUT_TIMEOUTS: dict[str, int] = { + "first_output_timeout": 90, + "between_output_timeout": 45, + "max_exec_timeout": 300, + "dialog_timeout": 5, +} + +@dataclass +class ShellWrap: + id: int + session: LocalInteractiveSession | SSHInteractiveSession + running: bool @dataclass class State: ssh_enabled: bool - shells: dict[int, LocalInteractiveSession | SSHInteractiveSession] + shells: dict[int, ShellWrap] class CodeExecution(Tool): - async def execute(self, **kwargs): + # Common shell prompt regex patterns (add more as needed) + prompt_patterns = [ + re.compile(r"\\(venv\\).+[$#] ?$"), # (venv) ...$ or (venv) ...# + re.compile(r"root@[^:]+:[^#]+# ?$"), # root@container:~# + re.compile(r"[a-zA-Z0-9_.-]+@[^:]+:[^$#]+[$#] ?$"), # user@host:~$ + re.compile(r"\(?.*\)?\s*PS\s+[^>]+> ?$"), # PowerShell prompt like (base) PS C:\...> + ] + # potential dialog detection + dialog_patterns = [ + re.compile(r"Y/N", re.IGNORECASE), # Y/N anywhere in line + re.compile(r"yes/no", re.IGNORECASE), # yes/no anywhere in line + re.compile(r":\s*$"), # line ending with colon + re.compile(r"\?\s*$"), # line ending with question mark + ] + + async def execute(self, **kwargs) -> Response: await self.agent.handle_intervention() # wait for intervention and handle it, if paused runtime = self.args.get("runtime", "").lower().strip() session = int(self.args.get("session", 0)) + self.allow_running = bool(self.args.get("allow_running", False)) if runtime == "python": response = await self.execute_python_code( @@ -42,7 +79,7 @@ async def execute(self, **kwargs): ) elif runtime == "output": response = await self.get_terminal_output( - session=session, first_output_timeout=60, between_output_timeout=5 + session=session, timeouts=OUTPUT_TIMEOUTS ) elif runtime == "reset": response = await self.reset_terminal(session=session) @@ -81,18 +118,18 @@ async def prepare_state(self, reset=False, session: int | None = None): # always reset state when ssh_enabled changes if not self.state or self.state.ssh_enabled != self.agent.config.code_exec_ssh_enabled: # initialize shells dictionary if not exists - shells: dict[int, LocalInteractiveSession | SSHInteractiveSession] = {} + shells: dict[int, ShellWrap] = {} else: shells = self.state.shells.copy() # Only reset the specified session if provided if reset and session is not None and session in shells: - await shells[session].close() + await shells[session].session.close() del shells[session] elif reset and not session: # Close all sessions if full reset requested for s in list(shells.keys()): - await shells[s].close() + await shells[s].session.close() shells = {} # initialize local or remote interactive shell interface for session 0 if needed @@ -109,11 +146,12 @@ async def prepare_state(self, reset=False, session: int | None = None): self.agent.config.code_exec_ssh_port, self.agent.config.code_exec_ssh_user, pswd, + cwd=self.get_cwd(), ) else: - shell = LocalInteractiveSession() + shell = LocalInteractiveSession(cwd=self.get_cwd()) - shells[session] = shell + shells[session] = ShellWrap(id=session, session=shell, running=False) await shell.connect() self.state = State(shells=shells, ssh_enabled=self.agent.config.code_exec_ssh_enabled) @@ -135,28 +173,35 @@ async def execute_nodejs_code(self, session: int, code: str, reset: bool = False async def execute_terminal_command( self, session: int, command: str, reset: bool = False ): - prefix = "bash> " + self.format_command_for_output(command) + "\n\n" + prefix = ("bash>" if not runtime.is_windows() or self.agent.config.code_exec_ssh_enabled else "PS>") + self.format_command_for_output(command) + "\n\n" return await self.terminal_session(session, command, reset, prefix) async def terminal_session( - self, session: int, command: str, reset: bool = False, prefix: str = "" + self, session: int, command: str, reset: bool = False, prefix: str = "", timeouts: dict | None = None ): self.state = await self.prepare_state(reset=reset, session=session) await self.agent.handle_intervention() # wait for intervention and handle it, if paused + + # Check if session is running and handle it + if not self.allow_running: + if response := await self.handle_running_session(session): + return response + # try again on lost connection for i in range(2): try: - await self.state.shells[session].send_command(command) + self.state.shells[session].running = True + await self.state.shells[session].session.send_command(command) locl = ( " (local)" - if isinstance(self.state.shells[session], LocalInteractiveSession) + if isinstance(self.state.shells[session].session, LocalInteractiveSession) else ( " (remote)" - if isinstance(self.state.shells[session], SSHInteractiveSession) + if isinstance(self.state.shells[session].session, SSHInteractiveSession) else " (unknown)" ) ) @@ -164,7 +209,7 @@ async def terminal_session( PrintStyle( background_color="white", font_color="#1B4F72", bold=True ).print(f"{self.agent.agent_name} code execution output{locl}") - return await self.get_terminal_output(session=session, prefix=prefix) + return await self.get_terminal_output(session=session, prefix=prefix, timeouts=(timeouts or CODE_EXEC_TIMEOUTS)) except Exception as e: if i == 1: @@ -196,26 +241,18 @@ async def get_terminal_output( max_exec_timeout=180, # hard cap on total runtime sleep_time=0.1, prefix="", + timeouts: dict | None = None, ): # if not self.state: self.state = await self.prepare_state(session=session) - # Common shell prompt regex patterns (add more as needed) - prompt_patterns = [ - re.compile(r"\(venv\).+[$#] ?$"), # (venv) ...$ or (venv) ...# - re.compile(r"root@[^:]+:[^#]+# ?$"), # root@container:~# - re.compile(r"[a-zA-Z0-9_.-]+@[^:]+:[^$#]+[$#] ?$"), # user@host:~$ - re.compile(r"bash-\d+\.\d+\$ ?$"), # bash-3.2$ (version can vary) - ] - - # potential dialog detection - dialog_patterns = [ - re.compile(r"Y/N", re.IGNORECASE), # Y/N anywhere in line - re.compile(r"yes/no", re.IGNORECASE), # yes/no anywhere in line - re.compile(r":\s*$"), # line ending with colon - re.compile(r"\?\s*$"), # line ending with question mark - ] + # Override timeouts if a dict is provided + if timeouts: + first_output_timeout = timeouts.get("first_output_timeout", first_output_timeout) + between_output_timeout = timeouts.get("between_output_timeout", between_output_timeout) + dialog_timeout = timeouts.get("dialog_timeout", dialog_timeout) + max_exec_timeout = timeouts.get("max_exec_timeout", max_exec_timeout) start_time = time.time() last_output_time = start_time @@ -229,7 +266,7 @@ async def get_terminal_output( while True: await asyncio.sleep(sleep_time) - full_output, partial_output = await self.state.shells[session].read_output( + full_output, partial_output = await self.state.shells[session].session.read_output( timeout=1, reset_full_output=reset_full_output ) reset_full_output = False # only reset once @@ -241,6 +278,7 @@ async def get_terminal_output( PrintStyle(font_color="#85C1E9").stream(partial_output) # full_output += partial_output # Append new output truncated_output = self.fix_full_output(full_output) + self.set_progress(truncated_output) heading = self.get_heading_from_output(truncated_output, 0) self.log.update(content=prefix + truncated_output, heading=heading) last_output_time = now @@ -252,7 +290,7 @@ async def get_terminal_output( ) last_lines.reverse() for idx, line in enumerate(last_lines): - for pat in prompt_patterns: + for pat in self.prompt_patterns: if pat.search(line.strip()): PrintStyle.info( "Detected shell prompt, returning output early." @@ -262,6 +300,7 @@ async def get_terminal_output( "\n".join(last_lines), idx + 1, True ) self.log.update(heading=heading) + self.mark_session_idle(session) return truncated_output # Check for max execution time @@ -308,7 +347,7 @@ async def get_terminal_output( truncated_output.splitlines()[-2:] if truncated_output else [] ) for line in last_lines: - for pat in dialog_patterns: + for pat in self.dialog_patterns: if pat.search(line.strip()): PrintStyle.info( "Detected dialog prompt, returning output early." @@ -331,6 +370,63 @@ async def get_terminal_output( ) return response + async def handle_running_session( + self, + session=0, + reset_full_output=True, + prefix="" + ): + if not self.state or session not in self.state.shells: + return None + if not self.state.shells[session].running: + return None + + full_output, _ = await self.state.shells[session].session.read_output( + timeout=1, reset_full_output=reset_full_output + ) + truncated_output = self.fix_full_output(full_output) + self.set_progress(truncated_output) + heading = self.get_heading_from_output(truncated_output, 0) + + last_lines = ( + truncated_output.splitlines()[-3:] if truncated_output else [] + ) + last_lines.reverse() + for idx, line in enumerate(last_lines): + for pat in self.prompt_patterns: + if pat.search(line.strip()): + PrintStyle.info( + "Detected shell prompt, returning output early." + ) + self.mark_session_idle(session) + return None + + has_dialog = False + for line in last_lines: + for pat in self.dialog_patterns: + if pat.search(line.strip()): + has_dialog = True + break + if has_dialog: + break + + if has_dialog: + sys_info = self.agent.read_prompt("fw.code.pause_dialog.md", timeout=1) + else: + sys_info = self.agent.read_prompt("fw.code.running.md", session=session) + + response = self.agent.read_prompt("fw.code.info.md", info=sys_info) + if truncated_output: + response = truncated_output + "\n\n" + response + PrintStyle(font_color="#FFA500", bold=True).print(response) + self.log.update(content=prefix + response, heading=heading) + return response + + def mark_session_idle(self, session: int = 0): + # Mark session as idle - command finished + if self.state and session in self.state.shells: + self.state.shells[session].running = False + async def reset_terminal(self, session=0, reason: str | None = None): # Print the reason for the reset to the console if provided if reason: @@ -371,6 +467,17 @@ def fix_full_output(self, output: str): # remove any single byte \xXX escapes output = re.sub(r"(? tuple[str | None, str | None]: + context = self.agent.context + if not context: + return (None, None) + project_slug = get_context_project_name(context) + if not project_slug: + return (None, None) + try: + metadata = load_basic_project_data(project_slug) + color = metadata.get("color") or None + except Exception: + color = None + return project_slug, color + async def list_tasks(self, **kwargs) -> Response: state_filter: list[str] | None = kwargs.get("state", None) type_filter: list[str] | None = kwargs.get("type", None) @@ -153,13 +168,17 @@ async def create_scheduled_task(self, **kwargs) -> Response: if not re.match(cron_regex, task_schedule.to_crontab()): return Response(message="Invalid cron expression: " + task_schedule.to_crontab(), break_loop=False) + project_slug, project_color = self._resolve_project_metadata() + task = ScheduledTask.create( name=name, system_prompt=system_prompt, prompt=prompt, attachments=attachments, schedule=task_schedule, - context_id=None if dedicated_context else self.agent.context.id + context_id=None if dedicated_context else self.agent.context.id, + project_name=project_slug, + project_color=project_color, ) await TaskScheduler.get().add_task(task) return Response(message=f"Scheduled task '{name}' created: {task.uuid}", break_loop=False) @@ -172,13 +191,17 @@ async def create_adhoc_task(self, **kwargs) -> Response: token: str = str(random.randint(1000000000000000000, 9999999999999999999)) dedicated_context: bool = kwargs.get("dedicated_context", False) + project_slug, project_color = self._resolve_project_metadata() + task = AdHocTask.create( name=name, system_prompt=system_prompt, prompt=prompt, attachments=attachments, token=token, - context_id=None if dedicated_context else self.agent.context.id + context_id=None if dedicated_context else self.agent.context.id, + project_name=project_slug, + project_color=project_color, ) await TaskScheduler.get().add_task(task) return Response(message=f"Adhoc task '{name}' created: {task.uuid}", break_loop=False) @@ -206,6 +229,8 @@ async def create_planned_task(self, **kwargs) -> Response: done=[] ) + project_slug, project_color = self._resolve_project_metadata() + # Create planned task with task plan task = PlannedTask.create( name=name, @@ -213,7 +238,9 @@ async def create_planned_task(self, **kwargs) -> Response: prompt=prompt, attachments=attachments, plan=task_plan, - context_id=None if dedicated_context else self.agent.context.id + context_id=None if dedicated_context else self.agent.context.id, + project_name=project_slug, + project_color=project_color ) await TaskScheduler.get().add_task(task) return Response(message=f"Planned task '{name}' created: {task.uuid}", break_loop=False) @@ -229,7 +256,7 @@ async def wait_for_task(self, **kwargs) -> Response: return Response(message=f"Task not found: {task_uuid}", break_loop=False) if task.context_id == self.agent.context.id: - return Response(message="You can only wait for tasks running in a different chat context (dedicated_context=True).", break_loop=False) + return Response(message="You can only wait for tasks running in their own dedicated context.", break_loop=False) done = False elapsed = 0 diff --git a/python/tools/wait.py b/python/tools/wait.py new file mode 100644 index 0000000000..000573c5f2 --- /dev/null +++ b/python/tools/wait.py @@ -0,0 +1,89 @@ +import asyncio +from datetime import datetime, timedelta, timezone +from python.helpers.tool import Tool, Response +from python.helpers.print_style import PrintStyle +from python.helpers.wait import managed_wait +from python.helpers.localization import Localization + +class WaitTool(Tool): + + async def execute(self, **kwargs) -> Response: + await self.agent.handle_intervention() + + seconds = self.args.get("seconds", 0) + minutes = self.args.get("minutes", 0) + hours = self.args.get("hours", 0) + days = self.args.get("days", 0) + until_timestamp_str = self.args.get("until") + + is_duration_wait = not bool(until_timestamp_str) + + now = datetime.now(timezone.utc) + target_time = None + + if until_timestamp_str: + try: + target_time = Localization.get().localtime_str_to_utc_dt(until_timestamp_str) + if not target_time: + raise ValueError(f"Invalid timestamp format: {until_timestamp_str}") + except ValueError as e: + return Response( + message=str(e), + break_loop=False, + ) + else: + wait_duration = timedelta( + days=int(days), + hours=int(hours), + minutes=int(minutes), + seconds=int(seconds), + ) + if wait_duration.total_seconds() <= 0: + return Response( + message="Wait duration must be positive.", + break_loop=False, + ) + target_time = now + wait_duration + + if target_time <= now: + return Response( + message=f"Target time {target_time.isoformat()} is in the past.", + break_loop=False, + ) + + PrintStyle.info(f"Waiting until {target_time.isoformat()}...") + + target_time = await managed_wait( + agent=self.agent, + target_time=target_time, + is_duration_wait=is_duration_wait, + log=self.log, + get_heading_callback=self.get_heading + ) + + if self.log: + self.log.update(heading=self.get_heading("Done", done=True)) + + message = self.agent.read_prompt( + "fw.wait_complete.md", + target_time=target_time.isoformat() + ) + + return Response( + message=message, + break_loop=False, + ) + + def get_log_object(self): + return self.agent.context.log.log( + type="progress", + heading=self.get_heading(), + content="", + kvps=self.args, + ) + + def get_heading(self, text: str = "", done: bool = False): + done_icon = " icon://done_all" if done else "" + if not text: + text = f"Waiting..." + return f"icon://timer Wait: {text}{done_icon}" diff --git a/requirements.dev.txt b/requirements.dev.txt new file mode 100644 index 0000000000..93bada7955 --- /dev/null +++ b/requirements.dev.txt @@ -0,0 +1,3 @@ +pytest>=8.4.2 +pytest-asyncio>=1.2.0 +pytest-mock>=3.15.1 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 17856f8b76..755cfcf5ec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ fastmcp==2.3.4 fasta2a==0.5.0 flask[async]==3.0.3 flask-basicauth==0.2.0 -flaredantic==0.1.4 +flaredantic==0.1.5 GitPython==3.1.43 inputimeout==1.0.4 kokoro>=0.9.2 @@ -33,8 +33,8 @@ unstructured-client==0.31.0 webcolors==24.6.0 nest-asyncio==1.6.0 crontab==1.0.1 -litellm==1.75.0 markdownify==1.1.0 +pydantic==2.11.7 pymupdf==1.25.3 pytesseract==0.3.13 pdf2image==1.17.0 @@ -42,3 +42,9 @@ crontab==1.0.1 pathspec>=0.12.1 psutil>=7.0.0 soundfile==0.13.1 +imapclient>=3.0.1 +html2text>=2024.2.26 +beautifulsoup4>=4.12.3 +boto3>=1.35.0 +exchangelib>=5.4.3 +pywinpty==3.0.2; sys_platform == "win32" \ No newline at end of file diff --git a/requirements2.txt b/requirements2.txt new file mode 100644 index 0000000000..7256765d89 --- /dev/null +++ b/requirements2.txt @@ -0,0 +1,2 @@ +litellm==1.79.3 +openai==1.99.5 \ No newline at end of file diff --git a/run_ui.py b/run_ui.py index adbf75085d..1691f69e74 100644 --- a/run_ui.py +++ b/run_ui.py @@ -17,6 +17,7 @@ from python.helpers.extract_tools import load_classes_from_folder from python.helpers.api import ApiHandler from python.helpers.print_style import PrintStyle +from python.helpers import login # disable logging import logging @@ -116,24 +117,17 @@ async def decorated(*args, **kwargs): return decorated -def _get_credentials_hash(): - user = dotenv.get_dotenv_value("AUTH_LOGIN") - password = dotenv.get_dotenv_value("AUTH_PASSWORD") - if not user: - return None - return hashlib.sha256(f"{user}:{password}".encode()).hexdigest() - # require authentication for handlers def requires_auth(f): @wraps(f) async def decorated(*args, **kwargs): - user_pass_hash = _get_credentials_hash() + user_pass_hash = login.get_credentials_hash() # If no auth is configured, just proceed if not user_pass_hash: return await f(*args, **kwargs) if session.get('authentication') != user_pass_hash: - return redirect(url_for('login')) + return redirect(url_for('login_handler')) return await f(*args, **kwargs) @@ -153,14 +147,14 @@ async def decorated(*args, **kwargs): return decorated @webapp.route("/login", methods=["GET", "POST"]) -async def login(): +async def login_handler(): error = None if request.method == 'POST': user = dotenv.get_dotenv_value("AUTH_LOGIN") password = dotenv.get_dotenv_value("AUTH_PASSWORD") if request.form['username'] == user and request.form['password'] == password: - session['authentication'] = _get_credentials_hash() + session['authentication'] = login.get_credentials_hash() return redirect(url_for('serve_index')) else: error = 'Invalid Credentials. Please try again.' @@ -169,9 +163,9 @@ async def login(): return render_template_string(login_page_content, error=error) @webapp.route("/logout") -async def logout(): +async def logout_handler(): session.pop('authentication', None) - return redirect(url_for('login')) + return redirect(url_for('login_handler')) # handle default address, load index @webapp.route("/", methods=["GET"]) diff --git a/tests/chunk_parser_test.py b/tests/chunk_parser_test.py index 9297d2fbf3..fade5db55b 100644 --- a/tests/chunk_parser_test.py +++ b/tests/chunk_parser_test.py @@ -7,6 +7,10 @@ ex2 = "reasoning goes here None: + for entry, value in structure.items(): + rel = os.path.join(base_rel, entry) + if isinstance(value, dict): + create_dir(rel) + materialize_structure(rel, value) + else: + write_file(rel, "" if value is None else str(value)) + + +def ensure_ignore_file(base_rel: str, content: str) -> None: + write_file(os.path.join(base_rel, ".treeignore"), content.strip() + "\n") + + +def print_header(title: str, char: str = "=") -> None: + print(char * 80) + print(title) + print(char * 80) + + +def print_flat(items: List[Dict[str, Any]]) -> None: + print("level type name text") + print("-" * 80) + for item in items: + level = item["level"] + item_type = item["type"] + name = item["name"] + text = item["text"] + print(f"{level:<5} {item_type:<7} {name:<20} {text}") + + +def print_nested(items: List[Dict[str, Any]], root_label: str) -> None: + print(root_label) + + def recurse(nodes: List[Dict[str, Any]], prefix: str) -> None: + total = len(nodes) + for index, node in enumerate(nodes): + is_last = index == total - 1 + connector = "└── " if is_last else "β”œβ”€β”€ " + label = node["name"] + ("/" if node["type"] == "folder" else "") + print(f"{prefix}{connector}{label} [{node['type']}]") + children = node.get("items") or [] + if children: + child_prefix = prefix + (" " if is_last else "β”‚ ") + recurse(children, child_prefix) + + recurse(items, "") + + +@contextmanager +def scenario_directory(name: str) -> Iterable[str]: + rel_path = os.path.join(BASE_TEMP_ROOT, name) + delete_dir(rel_path) + create_dir(rel_path) + try: + yield rel_path + finally: + delete_dir(rel_path) + + +def _set_entry_times(relative_path: str, timestamp: float) -> None: + abs_path = get_abs_path(relative_path) + os.utime(abs_path, (timestamp, timestamp)) + time.sleep(0.01) + + +def _apply_timestamps(base_rel: str, paths: List[str], base_ts: Optional[float] = None) -> None: + if base_ts is None: + base_ts = time.time() + for offset, rel in enumerate(paths, start=1): + _set_entry_times(os.path.join(base_rel, rel), base_ts + offset) + + +def list_scenarios(scenarios: List[Scenario]) -> None: + print("Available scenarios:") + for scenario in scenarios: + print(f" - {scenario.name}: {scenario.description}") + + +def run_scenarios(selected: List[Scenario]) -> None: + create_dir(BASE_TEMP_ROOT) + for scenario in selected: + print_header(f"Scenario: {scenario.name} β€” {scenario.description}") + with scenario_directory(scenario.name) as base_rel: + materialize_structure(base_rel, scenario.structure) + + if scenario.ignore_content: + ensure_ignore_file(base_rel, scenario.ignore_content) + + if scenario.setup: + scenario.setup(base_rel) + + for config in scenario.configs: + print_header(f"Configuration: {config.label}", "-") + params = { + "relative_path": base_rel, + "max_depth": 0, + "max_lines": 0, + "folders_first": True, + "max_folders": None, + "max_files": None, + "sort": (SORT_BY_MODIFIED, SORT_DESC), + **config.params, + } + output_mode = params.setdefault("output_mode", OUTPUT_MODE_STRING) + print("Parameters:") + print(f" output_mode : {output_mode}") + print(f" folders_first : {params['folders_first']}") + sort_key, sort_dir = params["sort"] + print(f" sort : key={sort_key}, direction={sort_dir}") + print(f" max_depth : {params['max_depth']}") + print(f" max_lines : {params['max_lines']}") + print(f" max_folders : {params['max_folders']}") + print(f" max_files : {params['max_files']}") + print(f" ignore : {params.get('ignore')}") + print() + result = file_tree(**params) + + if output_mode == OUTPUT_MODE_STRING: + print(result) + elif output_mode == OUTPUT_MODE_FLAT: + print_flat(result) # type: ignore[arg-type] + elif output_mode == OUTPUT_MODE_NESTED: + print_nested(result, f"{scenario.name}/") + else: + print(f"(Unhandled output mode {output_mode!r})") + + print() + + +def build_scenarios() -> List[Scenario]: + scenarios: List[Scenario] = [] + + scenarios.append( + Scenario( + name="basic_breadth_first", + description="Default breadth-first traversal with mixed folders/files", + structure={ + "alpha": {"alpha_file.txt": "alpha", "nested": {"inner.txt": "inner"}}, + "beta": {"beta_file.txt": "beta"}, + "zeta": {}, + "a.txt": "A", + "b.txt": "B", + }, + configs=[ + Config( + "string β€’ folders-first (name asc)", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + Config( + "string β€’ folders-first disabled", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": False, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + Config( + "flat β€’ folders-first", + { + "output_mode": OUTPUT_MODE_FLAT, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + Config( + "nested β€’ folders-first", + { + "output_mode": OUTPUT_MODE_NESTED, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + ], + ) + ) + + def setup_sorting(base_rel: str) -> None: + entries = [ + "folder_alpha", + "folder_beta", + "file_first.txt", + "file_second.txt", + "file_third.txt", + ] + for index, entry in enumerate(entries, start=1): + abs_path = get_abs_path(os.path.join(base_rel, entry)) + timestamp = 200_000_0000 + index + os.utime(abs_path, (timestamp, timestamp)) + + scenarios.append( + Scenario( + name="sorting_variants", + description="Demonstrate sorting by name and timestamp with folders/files", + structure={ + "folder_alpha": {}, + "folder_beta": {}, + "file_first.txt": "", + "file_second.txt": "", + "file_third.txt": "", + }, + configs=[ + Config( + "string β€’ sort by name asc", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + Config( + "string β€’ sort by created desc", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": True, + "sort": (SORT_BY_CREATED, SORT_DESC), + }, + ), + Config( + "flat β€’ sort by modified asc", + { + "output_mode": OUTPUT_MODE_FLAT, + "folders_first": True, + "sort": (SORT_BY_MODIFIED, SORT_ASC), + }, + ), + ], + setup=setup_sorting, + ) + ) + + scenarios.append( + Scenario( + name="ignore_and_limits", + description="Ignore file semantics with max_folders/max_files summaries", + structure={ + "src": { + "main.py": "print('hello')", + "utils.py": "pass", + "tmp.tmp": "", + "cache": {"cached.txt": "", "keep.txt": ""}, + "modules": {"a.py": "", "b.py": "", "c.py": ""}, + "pkg": {"alpha.py": "", "beta.py": "", "gamma.py": ""}, + }, + "logs": {"2024.log": "", "2025.log": ""}, + "notes.md": "", + "guide.md": "", + "todo.md": "", + "build.tmp": "", + "archive": {}, + "assets": {}, + "sandbox": {}, + "vendor": {}, + }, + ignore_content="\n".join( + ["*.tmp", "cache/", "!src/cache/keep.txt", "logs/", "!logs/2025.log"] + ), + configs=[ + Config( + "string β€’ folders-first with summaries", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": False, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 1, + "max_files": 2, + "max_lines": 12, + "ignore": "file:.treeignore", + }, + ), + Config( + "nested β€’ inspect truncated branches & comments", + { + "output_mode": OUTPUT_MODE_NESTED, + "folders_first": False, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 1, + "max_files": 2, + "max_lines": 12, + "ignore": "file:.treeignore", + }, + ), + ], + ) + ) + + scenarios.append( + Scenario( + name="limits_exact_match", + description="Per-directory limits exactly met (no summary comments)", + structure={ + "pkg": { + "a.py": "", + "b.py": "", + "dir1": {}, + "dir2": {}, + } + }, + configs=[ + Config( + "string β€’ exact matches (no summaries)", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 2, + "max_files": 2, + }, + ), + Config( + "flat β€’ exact matches (no summaries)", + { + "output_mode": OUTPUT_MODE_FLAT, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 2, + "max_files": 2, + }, + ), + ], + ) + ) + + scenarios.append( + Scenario( + name="single_overflow", + description="Single overflow entries promoted instead of summary comment", + structure={ + "pkg": { + "dir_a": {}, + "dir_b": {}, + "file_a.txt": "", + } + }, + configs=[ + Config( + "string β€’ single folder overflow", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 1, + }, + ), + Config( + "string β€’ single file overflow", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": False, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_files": 1, + }, + ), + Config( + "flat β€’ folders-first", + { + "output_mode": OUTPUT_MODE_FLAT, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 1, + }, + ), + ], + ) + ) + + scenarios.append( + Scenario( + name="global_max_lines", + description="Global max_lines finishing current depth before truncation", + structure={ + "layer1_a": { + "layer2_a": { + "layer3_a": { + "layer4_a": {"layer5_a.txt": ""}, + } + } + }, + "layer1_b": { + "layer2_b": { + "layer3_b": { + "layer4_b": {"layer5_b.txt": ""}, + } + } + }, + "root_file.txt": "", + }, + configs=[ + Config( + "string β€’ max_lines=6", + { + "output_mode": OUTPUT_MODE_STRING, + "max_lines": 6, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + Config( + "nested β€’ max_lines=6", + { + "output_mode": OUTPUT_MODE_NESTED, + "max_lines": 6, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + ], + ) + ) + + scenarios.append( + Scenario( + name="flat_files_first_limits", + description="Flat output with files-first ordering and per-directory summaries", + structure={ + "dir1": {}, + "dir2": {}, + "dir3": {}, + "dir4": {}, + "a.txt": "", + "b.txt": "", + "c.txt": "", + }, + configs=[ + Config( + "flat β€’ files-first with limits", + { + "output_mode": OUTPUT_MODE_FLAT, + "folders_first": False, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 1, + "max_files": 1, + }, + ) + ], + ) + ) + + scenarios.append( + Scenario( + name="flat_sort_created_max_lines", + description="Flat output sorted by created time with global max_lines", + structure={ + "dirA": {"inner.txt": ""}, + "file1.txt": "", + "file2.txt": "", + "file3.txt": "", + }, + setup=lambda base_rel: _apply_timestamps( + base_rel, + [ + "dirA", + os.path.join("dirA", "inner.txt"), + "file1.txt", + "file2.txt", + "file3.txt", + ], + base_ts=2_000_001_000, + ), + configs=[ + Config( + "flat β€’ sort by created desc, max_lines=4", + { + "output_mode": OUTPUT_MODE_FLAT, + "folders_first": True, + "sort": (SORT_BY_CREATED, SORT_DESC), + "max_lines": 4, + }, + ) + ], + ) + ) + + scenarios.append( + Scenario( + name="nested_files_first_limits", + description="Nested output with files-first ordering and per-directory summaries", + structure={ + "dir": {"a.py": "", "b.py": "", "c.py": ""}, + "folder_a": {"inner.txt": ""}, + "folder_b": {}, + "folder_c": {}, + }, + configs=[ + Config( + "nested β€’ files-first with limits", + { + "output_mode": OUTPUT_MODE_NESTED, + "folders_first": False, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 1, + "max_files": 1, + }, + ) + ], + ) + ) + + scenarios.append( + Scenario( + name="nested_max_depth_sort", + description="Nested output with created-time ordering and depth pruning", + structure={ + "root": { + "branch": { + "leaf_a.txt": "", + "leaf_b.txt": "", + } + }, + "alpha.txt": "", + }, + setup=lambda base_rel: _apply_timestamps( + base_rel, + [ + "root", + os.path.join("root", "branch"), + os.path.join("root", "branch", "leaf_a.txt"), + os.path.join("root", "branch", "leaf_b.txt"), + "alpha.txt", + ], + base_ts=2_000_010_000, + ), + configs=[ + Config( + "nested β€’ sort by created asc, max_depth=2", + { + "output_mode": OUTPUT_MODE_NESTED, + "folders_first": True, + "sort": (SORT_BY_CREATED, SORT_ASC), + "max_depth": 2, + }, + ) + ], + ) + ) + + scenarios.append( + Scenario( + name="string_additional_limits", + description="String output exercising files-first+max_lines and zero-limit semantics", + structure={ + "dir": {"inner_a.txt": "", "inner_b.txt": ""}, + "alpha.txt": "", + "beta.txt": "", + "gamma.txt": "", + }, + setup=lambda base_rel: _apply_timestamps( + base_rel, + [ + "dir", + os.path.join("dir", "inner_a.txt"), + os.path.join("dir", "inner_b.txt"), + "alpha.txt", + "beta.txt", + "gamma.txt", + ], + base_ts=2_000_020_000, + ), + configs=[ + Config( + "string β€’ files-first, sort=modified desc, max_lines=4", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": False, + "sort": (SORT_BY_MODIFIED, SORT_DESC), + "max_lines": 4, + }, + ), + Config( + "string β€’ zero file limit acts unlimited", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 2, + "max_files": 0, + }, + ), + ], + ) + ) + + stress_structure = { + "level1_a": { + "level2_a1": { + "leaf_a1_1.txt": "", + "leaf_a1_2.txt": "", + "leaf_a1_3.txt": "", + }, + "level2_a2": { + "leaf_a2_1.txt": "", + "leaf_a2_2.txt": "", + "leaf_a2_3.txt": "", + }, + "level2_a3": { + "subfolder_a3": { + "deep_a3_1.txt": "", + "deep_a3_2.txt": "", + "deep_a3_3.txt": "", + "subsubfolder_a3": { + "deep_a3_4.txt": "", + "deep_a3_5.txt": "", + }, + "subsubfolder_a3_extra": { + "deep_a3_extra_1.txt": "", + "deep_a3_extra_2.txt": "", + }, + }, + "subfolder_a3_extra": { + "deep_extra_1.txt": "", + "deep_extra_2.txt": "", + }, + "subfolder_a3_more": { + "deep_more_1.txt": "", + }, + }, + }, + "level1_b": { + "level2_b1": { + "leaf_b1_1.txt": "", + "leaf_b1_2.txt": "", + }, + "level2_b2": { + "leaf_b2_1.txt": "", + "leaf_b2_2.txt": "", + "leaf_b2_3.txt": "", + "leaf_b2_4.txt": "", + "leaf_b2_5.txt": "", + }, + "level2_b3": { + "subfolder_b3": { + "deep_b3_1.txt": "", + "deep_b3_2.txt": "", + "deep_b3_3.txt": "", + "deep_b3_4.txt": "", + }, + "subfolder_b3_extra": { + "deeper_b3_extra.txt": "", + "deeper_b3_extra_2.txt": "", + }, + }, + }, + "level1_c": { + "level2_c1": { + "leaf_c1_1.txt": "", + "leaf_c1_2.txt": "", + "leaf_c1_3.txt": "", + "leaf_c1_4.txt": "", + "leaf_c1_5.txt": "", + }, + "level2_c2": { + "subfolder_c2": { + "deep_c2_1.txt": "", + "deep_c2_2.txt": "", + }, + "subfolder_c2_extra": { + "deep_c2_extra_1.txt": "", + }, + }, + }, + "level1_d": { + "level2_d1": { + "leaf_d1_1.txt": "", + "leaf_d1_2.txt": "", + "leaf_d1_3.txt": "", + }, + "level2_d2": { + "subfolder_d2": { + "deep_d2_1.txt": "", + "deep_d2_2.txt": "", + }, + }, + }, + "root_file.txt": "", + "root_notes.md": "", + "root_file_2.txt": "", + "root_file_3.txt": "", + } + + scenarios.append( + Scenario( + name="mixed_limits_baseline", + description="Full structure without truncation for comparison", + structure=stress_structure, + configs=[ + Config( + "string β€’ no limits baseline", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + Config( + "flat β€’ no limits baseline", + { + "output_mode": OUTPUT_MODE_FLAT, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + Config( + "nested β€’ no limits baseline", + { + "output_mode": OUTPUT_MODE_NESTED, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + }, + ), + ], + ) + ) + + scenarios.append( + Scenario( + name="mixed_limits_stress", + description="Same structure with local and global limits applied", + structure=stress_structure, + configs=[ + Config( + "string β€’ mixed local/global limits stress", + { + "output_mode": OUTPUT_MODE_STRING, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 2, + "max_files": 2, + "max_lines": 19, + }, + ), + Config( + "flat β€’ mixed limits stress", + { + "output_mode": OUTPUT_MODE_FLAT, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 2, + "max_files": 2, + "max_lines": 19, + }, + ), + Config( + "nested β€’ mixed limits stress", + { + "output_mode": OUTPUT_MODE_NESTED, + "folders_first": True, + "sort": (SORT_BY_NAME, SORT_ASC), + "max_folders": 2, + "max_files": 2, + "max_lines": 19, + }, + ), + ], + ) + ) + + return scenarios + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Visualize file_tree() outputs across configurations." + ) + parser.add_argument( + "--scenario", + action="append", + dest="scenarios", + help="Scenario name to run (repeat for multiple). Default: run all.", + ) + parser.add_argument( + "--list", + action="store_true", + help="List available scenarios and exit.", + ) + return parser.parse_args() + + +def main() -> None: + scenarios = build_scenarios() + args = parse_args() + + if args.list: + list_scenarios(scenarios) + return + + if args.scenarios: + name_map = {scenario.name: scenario for scenario in scenarios} + unknown = [name for name in args.scenarios if name not in name_map] + if unknown: + raise SystemExit(f"Unknown scenario(s): {', '.join(unknown)}") + selected = [name_map[name] for name in args.scenarios] + else: + selected = scenarios + + run_scenarios(selected) + + +if __name__ == "__main__": + main() diff --git a/usr/.gitkeep b/usr/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/webui/components/chat/attachments/attachmentsStore.js b/webui/components/chat/attachments/attachmentsStore.js index bb159d2d1d..7e17b9b554 100644 --- a/webui/components/chat/attachments/attachmentsStore.js +++ b/webui/components/chat/attachments/attachmentsStore.js @@ -1,5 +1,6 @@ import { createStore } from "/js/AlpineStore.js"; import { fetchApi } from "/js/api.js"; +import { store as imageViewerStore } from "../../modals/image-viewer/image-viewer-store.js"; const model = { // State properties @@ -7,13 +8,6 @@ const model = { hasAttachments: false, dragDropOverlayVisible: false, - // Image modal properties - currentImageUrl: null, - currentImageName: null, - imageLoaded: false, - imageError: false, - zoomLevel: 1, - async init() { await this.initialize(); }, @@ -358,7 +352,7 @@ const model = { previewUrl: previewUrl, clickHandler: () => { if (this.isImageFile(filename)) { - this.openImageModal(this.getServerImgUrl(filename), filename); + imageViewerStore.open(this.getServerImgUrl(filename), { name: filename }); } else { this.downloadAttachment(filename); } @@ -380,7 +374,7 @@ const model = { clickHandler: () => { if (attachment.type === "image") { const imageUrl = this.getServerImgUrl(attachment.name); - this.openImageModal(imageUrl, attachment.name); + imageViewerStore.open(imageUrl, { name: attachment.name }); } else { this.downloadAttachment(attachment.name); } @@ -425,50 +419,6 @@ const model = { ); }, - // Image modal methods - openImageModal(imageUrl, imageName) { - this.currentImageUrl = imageUrl; - this.currentImageName = imageName; - this.imageLoaded = false; - this.imageError = false; - this.zoomLevel = 1; - - // Open the modal using the modals system - if (window.openModal) { - window.openModal("chat/attachments/imageModal.html"); - } - }, - - closeImageModal() { - this.currentImageUrl = null; - this.currentImageName = null; - this.imageLoaded = false; - this.imageError = false; - this.zoomLevel = 1; - }, - - // Zoom controls - zoomIn() { - this.zoomLevel = Math.min(this.zoomLevel * 1.2, 5); // Max 5x zoom - this.updateImageZoom(); - }, - - zoomOut() { - this.zoomLevel = Math.max(this.zoomLevel / 1.2, 0.1); // Min 0.1x zoom - this.updateImageZoom(); - }, - - resetZoom() { - this.zoomLevel = 1; - this.updateImageZoom(); - }, - - updateImageZoom() { - const img = document.querySelector(".modal-image"); - if (img) { - img.style.transform = `scale(${this.zoomLevel})`; - } - }, }; const store = createStore("chatAttachments", model); diff --git a/webui/components/chat/attachments/inputPreview.html b/webui/components/chat/attachments/inputPreview.html index 0ed55365ae..5effd1f8da 100644 --- a/webui/components/chat/attachments/inputPreview.html +++ b/webui/components/chat/attachments/inputPreview.html @@ -1,5 +1,6 @@
@@ -11,7 +12,7 @@ :class="{'image-type': attachment.type === 'image', 'file-type': attachment.type === 'file'}"> +
+ + + + diff --git a/webui/components/sidebar/tasks/tasks-store.js b/webui/components/sidebar/tasks/tasks-store.js new file mode 100644 index 0000000000..eb44a67de1 --- /dev/null +++ b/webui/components/sidebar/tasks/tasks-store.js @@ -0,0 +1,73 @@ +import { createStore } from "/js/AlpineStore.js"; +import { store as chatsStore } from "/components/sidebar/chats/chats-store.js"; +import { store as schedulerStore } from "/components/modals/scheduler/scheduler-store.js"; + +// Tasks sidebar store: tasks list and selected task id +const model = { + tasks: [], + selected: "", + + init() { + // No-op: data is driven by poll() in index.js; this store provides a stable target + }, + + // Apply tasks coming from poll() and keep them sorted (newest first) + applyTasks(tasksList) { + try { + const tasks = Array.isArray(tasksList) ? tasksList : []; + const sorted = [...tasks].sort((a, b) => (b?.created_at || 0) - (a?.created_at || 0)); + this.tasks = sorted; + + // After updating tasks, ensure selection is still valid + if (this.selected && !this.contains(this.selected)) { + this.setSelected(""); + } + } catch (e) { + console.error("tasks-store.applyTasks failed", e); + this.tasks = []; + } + }, + + // Update selected task and persist for tab restore + setSelected(taskId) { + this.selected = taskId || ""; + try { localStorage.setItem("lastSelectedTask", this.selected); } catch {} + }, + + // Returns true if a task with the given id exists in the current list + contains(taskId) { + return Array.isArray(this.tasks) && this.tasks.some((t) => t?.id === taskId); + }, + + // Convenience: id of the first task in the current list (or empty string) + firstId() { + return (Array.isArray(this.tasks) && this.tasks[0]?.id) || ""; + }, + + // Action methods for task management + selectTask(taskId) { + this.setSelected(taskId); + chatsStore.selectChat(taskId); + }, + + openDetail(taskId) { + // Open lightweight task detail popup directly + if (schedulerStore?.showTaskDetail) { + schedulerStore.showTaskDetail(taskId); + } + }, + + reset(taskId) { + chatsStore.resetChat(taskId); + }, + + deleteTask(taskId) { + if (schedulerStore?.deleteTaskFromSidebar) { + schedulerStore.deleteTaskFromSidebar(taskId); + } + }, +}; + +export const store = createStore("tasks", model); + + diff --git a/webui/components/sidebar/top-section/header-icons.html b/webui/components/sidebar/top-section/header-icons.html new file mode 100644 index 0000000000..0e79bc4ecb --- /dev/null +++ b/webui/components/sidebar/top-section/header-icons.html @@ -0,0 +1,76 @@ + + + + + +
+ + + +
+ + a0 + +
+
+ + + + + + + diff --git a/webui/components/sidebar/top-section/quick-actions.html b/webui/components/sidebar/top-section/quick-actions.html new file mode 100644 index 0000000000..30927ee081 --- /dev/null +++ b/webui/components/sidebar/top-section/quick-actions.html @@ -0,0 +1,249 @@ + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + +
+ + + + + diff --git a/webui/components/sidebar/top-section/sidebar-top.html b/webui/components/sidebar/top-section/sidebar-top.html new file mode 100644 index 0000000000..5ba8b3180b --- /dev/null +++ b/webui/components/sidebar/top-section/sidebar-top.html @@ -0,0 +1,31 @@ + + + + + +
+ +
+ + + + + + diff --git a/webui/components/tooltips/tooltip-store.js b/webui/components/tooltips/tooltip-store.js new file mode 100644 index 0000000000..8fb582dd1f --- /dev/null +++ b/webui/components/tooltips/tooltip-store.js @@ -0,0 +1,105 @@ +import { createStore } from "/js/AlpineStore.js"; + +let bootstrapTooltipObserver = null; + +function ensureBootstrapTooltip(element) { + if (!element || !(element instanceof Element)) return; + + const bs = globalThis.bootstrap; + if (!bs?.Tooltip) return; + + const existing = bs.Tooltip.getInstance(element); + const title = element.getAttribute("title") || element.getAttribute("data-bs-original-title"); + + if (!title) return; + + if (existing) { + if (element.getAttribute("title")) { + element.setAttribute("data-bs-original-title", title); + element.removeAttribute("title"); + } + existing.setContent({ ".tooltip-inner": title }); + return; + } + + if (element.getAttribute("title")) { + element.setAttribute("data-bs-original-title", title); + element.removeAttribute("title"); + } + + element.setAttribute("data-bs-toggle", "tooltip"); + element.setAttribute("data-bs-trigger", "hover"); + element.setAttribute("data-bs-tooltip-initialized", "true"); + new bs.Tooltip(element, { + delay: { show: 0, hide: 0 }, + trigger: "hover", + }); +} + +function initBootstrapTooltips(root = document) { + if (!globalThis.bootstrap?.Tooltip) return; + const tooltipTargets = root.querySelectorAll( + "[title]:not([data-bs-tooltip-initialized]), [data-bs-original-title]:not([data-bs-tooltip-initialized])" + ); + tooltipTargets.forEach((element) => ensureBootstrapTooltip(element)); +} + +function observeBootstrapTooltips() { + if (!globalThis.bootstrap?.Tooltip) return; + + // Prevent multiple observers + if (bootstrapTooltipObserver) return; + + bootstrapTooltipObserver = new MutationObserver((mutations) => { + mutations.forEach((mutation) => { + if (mutation.type === "attributes" && mutation.attributeName === "title") { + ensureBootstrapTooltip(mutation.target); + return; + } + + if (mutation.type === "childList") { + // Check removed nodes for tooltip cleanup + mutation.removedNodes.forEach((node) => { + if (!(node instanceof Element)) return; + const tooltipElements = node.matches?.('[data-bs-tooltip-initialized]') ? [node] : Array.from(node.querySelectorAll?.('[data-bs-tooltip-initialized]') || []); + tooltipElements.forEach((el) => { + const instance = globalThis.bootstrap?.Tooltip?.getInstance(el); + if (instance) { + instance.dispose(); + } + }); + }); + + mutation.addedNodes.forEach((node) => { + if (!(node instanceof Element)) return; + if (node.matches("[title]") || node.querySelector("[title]")) { + initBootstrapTooltips(node); + } + }); + } + }); + }); + + bootstrapTooltipObserver.observe(document.body, { + childList: true, + subtree: true, + attributes: true, + attributeFilter: ["title"], + }); +} + +function cleanupTooltipObserver() { + if (bootstrapTooltipObserver) { + bootstrapTooltipObserver.disconnect(); + bootstrapTooltipObserver = null; + } +} + +export const store = createStore("tooltips", { + init() { + initBootstrapTooltips(); + observeBootstrapTooltips(); + }, + + cleanup: cleanupTooltipObserver, +}); diff --git a/webui/components/welcome/welcome-screen.html b/webui/components/welcome/welcome-screen.html new file mode 100644 index 0000000000..a5252eb9e1 --- /dev/null +++ b/webui/components/welcome/welcome-screen.html @@ -0,0 +1,423 @@ + + + + + + + +
+ +
+ + + + + diff --git a/webui/components/welcome/welcome-store.js b/webui/components/welcome/welcome-store.js new file mode 100644 index 0000000000..4da18c9edc --- /dev/null +++ b/webui/components/welcome/welcome-store.js @@ -0,0 +1,204 @@ +import { createStore } from "/js/AlpineStore.js"; +import { getContext } from "/index.js"; +import { store as chatsStore } from "/components/sidebar/chats/chats-store.js"; +import { store as memoryStore } from "/components/modals/memory/memory-dashboard-store.js"; +import { store as projectsStore } from "/components/projects/projects-store.js"; +import * as API from "/js/api.js"; + +const model = { + // State + isVisible: true, + banners: [], + bannersLoading: false, + lastBannerRefresh: 0, + + init() { + // Initialize visibility based on current context + this.updateVisibility(); + + if (this.isVisible) { + this.refreshBanners(); + } + + // Watch for context changes with faster polling for immediate response + setInterval(() => { + this.updateVisibility(); + }, 50); // 50ms for very responsive updates + }, + + // Update visibility based on current context + updateVisibility() { + const hasContext = !!getContext(); + const wasVisible = this.isVisible; + this.isVisible = !hasContext; + + if (this.isVisible && !wasVisible) { + this.refreshBanners(); + } + }, + + // Hide welcome screen + hide() { + this.isVisible = false; + }, + + // Show welcome screen + show() { + this.isVisible = true; + this.refreshBanners(); + }, + + // Build frontend context to send to backend + buildFrontendContext() { + return { + url: window.location.href, + protocol: window.location.protocol, + hostname: window.location.hostname, + port: window.location.port, + browser: navigator.userAgent, + timestamp: new Date().toISOString(), + }; + }, + + // Frontend banner checks (most checks are on backend; add browser-only checks here) + runFrontendBannerChecks() { + return []; + }, + + // Call backend API for additional banners + async runBackendBannerChecks(frontendBanners, frontendContext) { + try { + const response = await API.callJsonApi("/banners", { + banners: frontendBanners, + context: frontendContext, + }); + return response?.banners || []; + } catch (error) { + console.error("Failed to fetch backend banners:", error); + return []; + } + }, + + // Get list of dismissed banner IDs from storage + getDismissedBannerIds() { + const permanent = JSON.parse(localStorage.getItem('dismissed_banners') || '[]'); + const temporary = JSON.parse(sessionStorage.getItem('dismissed_banners') || '[]'); + return new Set([...permanent, ...temporary]); + }, + + // Merge and filter banners: deduplicate by ID, skip dismissed, sort by priority + mergeBanners(frontendBanners, backendBanners) { + const dismissed = this.getDismissedBannerIds(); + const bannerMap = new Map(); + + for (const banner of frontendBanners) { + if (banner.id && !dismissed.has(banner.id)) { + bannerMap.set(banner.id, banner); + } + } + for (const banner of backendBanners) { + if (banner.id && !dismissed.has(banner.id)) { + bannerMap.set(banner.id, banner); + } + } + + return Array.from(bannerMap.values()).sort((a, b) => (b.priority || 0) - (a.priority || 0)); + }, + + // Refresh banners: frontend checks β†’ backend checks β†’ merge + async refreshBanners() { + const now = Date.now(); + if (now - this.lastBannerRefresh < 1000) return; + this.lastBannerRefresh = now; + this.bannersLoading = true; + + try { + const frontendContext = this.buildFrontendContext(); + const frontendBanners = this.runFrontendBannerChecks(); + const backendBanners = await this.runBackendBannerChecks(frontendBanners, frontendContext); + this.banners = this.mergeBanners(frontendBanners, backendBanners); + } catch (error) { + console.error("Failed to refresh banners:", error); + this.banners = this.runFrontendBannerChecks(); + } finally { + this.bannersLoading = false; + } + }, + + get sortedBanners() { + return [...this.banners].sort((a, b) => (b.priority || 0) - (a.priority || 0)); + }, + + /** + * Dismiss a banner by ID. + * + * Usage: + * dismissBanner('banner-id') - Temporary dismiss (sessionStorage, cleared on browser close) + * dismissBanner('banner-id', true) - Permanent dismiss (localStorage, persists across sessions) + * + * Dismissed banners are filtered out in mergeBanners() and won't appear until storage is cleared. + * + * @param {string} bannerId - The unique ID of the banner to dismiss + * @param {boolean} permanent - If true, store in localStorage; if false, store in sessionStorage + */ + dismissBanner(bannerId, permanent = false) { + this.banners = this.banners.filter(b => b.id !== bannerId); + + const storage = permanent ? localStorage : sessionStorage; + const dismissed = JSON.parse(storage.getItem('dismissed_banners') || '[]'); + if (!dismissed.includes(bannerId)) { + dismissed.push(bannerId); + storage.setItem('dismissed_banners', JSON.stringify(dismissed)); + } + }, + + getBannerClass(type) { + const classes = { + info: 'banner-info', + warning: 'banner-warning', + error: 'banner-error', + }; + return classes[type] || 'banner-info'; + }, + + getBannerIcon(type) { + const icons = { + info: 'info', + warning: 'warning', + error: 'error', + }; + return icons[type] || 'info'; + }, + + // Execute an action by ID + executeAction(actionId) { + switch (actionId) { + case "new-chat": + chatsStore.newChat(); + break; + case "settings": + // Open settings modal + const settingsButton = document.getElementById("settings"); + if (settingsButton) { + settingsButton.click(); + } + break; + case "projects": + projectsStore.openProjectsModal(); + break; + case "memory": + memoryStore.openModal(); + break; + case "website": + window.open("https://agent-zero.ai", "_blank"); + break; + case "github": + window.open("https://github.com/agent0ai/agent-zero", "_blank"); + break; + } + }, +}; + +// Create and export the store +const store = createStore("welcomeStore", model); +export { store }; diff --git a/webui/css/buttons.css b/webui/css/buttons.css new file mode 100644 index 0000000000..adfee2694b --- /dev/null +++ b/webui/css/buttons.css @@ -0,0 +1,227 @@ +/* Button Styles */ +.button { + background: var(--color-panel); + font-weight: 500; + padding: 0.5rem 1.5rem; + border-radius: 6px; + cursor: pointer; + border: 1px solid var(--color-border); + color: var(--color-text); + font-size: 0.875rem; + font-family: "Rubik", Arial, Helvetica, sans-serif; + transition: all 0.18s cubic-bezier(0.4, 0, 0.2, 1); + min-height: 2em; /* Standard height */ + display: inline-flex; + align-items: center; + justify-content: center; + box-sizing: border-box; +} + +.button.confirm { + background: var(--color-highlight); + color: #fff; +} + +.button.cancel { + background: var(--color-panel); + color: var(--color-accent); + /* border: 1px solid var(--color-accent); */ +} + +.button:hover { + transform: scale(1.05); + filter: brightness(1.05); +} + +.button.cancel:hover { + background: var(--color-panel); + border-color: var(--color-accent); + color: var(--color-accent); +} + +.buttons-container { + display: flex; + justify-content: space-between; + align-items: center; +} + +.buttons-left { + display: flex; + justify-content: flex-start; + gap: 0.5em; +} + +.buttons-right { + display: flex; + justify-content: flex-end; + gap: 0.5em; +} + +.icon-button { + padding-left: 0.75rem; + padding-right: 0.75rem; +} + +/* Action Header Buttons - for modal headers with confirm/cancel/edit/delete actions */ +.btn-action-header { + padding: 0.4rem 0.6rem; + display: flex; + align-items: center; + gap: 0.25rem; + background: var(--color-message-bg); + color: var(--color-text); + border: 1px solid var(--color-border); + border-radius: 4px; + cursor: pointer; + transition: all 0.15s ease; +} + +.btn-action-header .material-symbols-outlined { + font-size: 18px; +} + +.btn-action-header:hover { + border-color: var(--color-primary); + color: var(--color-primary); +} + +.btn-action-header.confirm:hover { + border-color: #4CAF50; + color: #4CAF50; + background: var(--color-background); +} + +.btn-action-header.cancel:hover { + border-color: var(--color-accent); + color: var(--color-accent); + background: var(--color-background); +} + +.btn-action-header.delete:hover { + border-color: var(--color-accent); + color: var(--color-accent); +} + +.btn-action-header.edit:hover, +.btn-action-header.copy:hover, +.btn-action-header.copy-all:hover, +.btn-action-header.copy-content:hover { + border-color: var(--color-primary); + color: var(--color-primary); +} + +/* Table/Row Action Buttons - for inline row actions (run, edit, delete, etc.) */ +.btn-action { + background: none; + border: 1px solid var(--color-border); + padding: 0.25rem; + color: var(--color-text); + opacity: 0.7; + display: inline-flex; + align-items: center; + justify-content: center; + border-radius: 4px; + cursor: pointer; + transition: all 0.15s ease; +} + +.btn-action .material-symbols-outlined { + font-size: 18px; +} + +.btn-action:hover { + opacity: 1; + background: var(--color-panel); + border-color: var(--color-primary); + color: var(--color-primary); + transform: translateY(-1px); +} + +.btn-action.delete:hover { + background: var(--color-panel); + border-color: var(--color-accent); + color: var(--color-accent); +} + +/* Icon Button - minimal padding for icon-only buttons */ +.btn-icon { + background: transparent; + border: 1px solid var(--color-border); + color: var(--color-text); + padding: 0.25rem; + display: flex; + align-items: center; + border-radius: 4px; + cursor: pointer; + transition: all 0.15s ease; +} + +.btn-icon:hover:not(:disabled) { + background: var(--color-panel); + border-color: var(--color-primary); + color: var(--color-text); +} + +.btn-icon:disabled { + opacity: 0.4; + cursor: not-allowed; +} + +.btn-icon .material-symbols-outlined { + font-size: 20px; +} + +/* Actions wrapper for grouping action buttons */ +.actions-wrapper { + display: flex; + align-items: center; + gap: 0.25rem; +} + +/* Inline Confirmation State */ +.confirming { + border-color: var(--color-highlight) !important; + background-color: var(--color-highlight) !important; + color: #fff !important; +} + +.confirming:hover { + filter: brightness(1.1); +} + +/* Standard icon-only action button for destructive actions */ +.btn-icon-action { + display: inline-flex; + align-items: center; + justify-content: center; + background: transparent; + border: 1px solid var(--color-border); + border-radius: 0.25rem; + color: var(--color-primary); + cursor: pointer; + padding: 0.25rem; + width: 1.75rem; + height: 1.75rem; + transition: all 0.18s cubic-bezier(0.4, 0, 0.2, 1); + flex-shrink: 0; +} + +.btn-icon-action .material-symbols-outlined, +.btn-icon-action .material-icons-outlined { + font-size: 1rem; +} + +.btn-icon-action:hover { + border-color: var(--color-primary); + background-color: var(--color-background-hover); +} + +.btn-icon-action:active { + background-color: var(--color-background-hover); + color: var(--color-primary); +} + +.btn-icon-action.danger:hover { + border-color: #e57373; + color: #e57373; +} diff --git a/webui/css/file_browser.css b/webui/css/file_browser.css deleted file mode 100644 index 4ed42877af..0000000000 --- a/webui/css/file_browser.css +++ /dev/null @@ -1,249 +0,0 @@ -/* File Browser Styles */ - -.files-list, -.file-header, -.file-item { - width: 100%; - border-radius: 4px; - overflow: hidden; -} - -/* Header Styles */ -.file-header { - display: grid; - grid-template-columns: 2fr 0.6fr 1fr 80px; - background: var(--secondary-bg); - padding: 8px 0; - font-weight: bold; - border-bottom: 1px solid var(--border-color); - color: var(--color-primary); -} - -.file-cell, -.file-cell-size, -.file-cell-date { - color: var(--color-primary); - padding: 4px; - cursor: pointer; -} - -/* File Item Styles */ -.file-item { - display: grid; - grid-template-columns: 2fr 0.6fr 1fr 80px; - align-items: center; - padding: 8px 0; - font-size: 0.875rem; - border-top: 1px solid var(--color-border); - transition: background-color 0.2s; - white-space: nowrap; - overflow: hidden; - color: var(--color-text); -} - -.file-item:hover { - background-color: var(--color-secondary); -} - -/* File Icon and Name */ -.file-icon { - width: 1.8rem; - height: 1.8rem; - margin: 0 1rem 0 0.7rem; - vertical-align: middle; - font-size: var(--font-size-sm); -} - -.file-name { - display: flex; - align-items: center; - font-weight: 500; - margin-right: var(--spacing-sm); - overflow: hidden; -} - -.file-name > span { - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; -} - -.file-size, -.file-date { - color: var(--text-secondary); -} - -/* No Files Message */ -.no-files { - padding: 32px; - text-align: center; - color: var(--text-secondary); -} - -/* Light Mode Adjustments */ -.light-mode .file-item:hover { - background-color: var(--color-secondary-light); -} - -/* Path Navigator Styles */ -.path-navigator { - display: flex; - align-items: center; - gap: 24px; - background-color: var(--color-message-bg); - padding: 0.5rem var(--spacing-sm); - margin-bottom: 0.3rem; - border: 1px solid var(--color-border); - border-radius: 8px; -} - -.nav-button { - padding: 4px 12px; - border: 1px solid var(--color-border); - border-radius: 4px; - background: var(--color-background); - color: var(--color-text); - cursor: pointer; - transition: background-color 0.2s; -} - -.nav-button:hover { - background: var(--hover-bg); -} - -.nav-button.back-button { - background-color: var(--color-secondary); - color: var(--color-text); -} - -.nav-button.back-button:hover { - background-color: var(--color-secondary-dark); -} - -#current-path { - opacity: 0.9; -} - -#path-text { - font-family: 'Roboto Mono', monospace; - font-optical-sizing: auto; - -webkit-font-optical-sizing: auto; - opacity: 0.9; -} - -/* Folder Specific Styles */ -.file-item[data-is-dir="true"] { - cursor: pointer; -} - -.file-item[data-is-dir="true"]:hover { - background-color: var(--color-secondary); -} - -/* Upload Button Styles */ -.upload-button, -.btn-upload { - display: inline-flex; - align-items: center; - padding: 8px 16px; - background-color: var(--color-primary); - color: white; - border-radius: 4px; - cursor: pointer; - transition: background-color 0.3s ease-in-out; -} - -.btn-upload { - background: #4248f1; - gap: 0.5rem; - margin: 0 auto; -} - -.btn-upload > svg { - width: 20px; -} - -.upload-button:hover, -.btn-upload:hover { - background-color: #353bc5; -} - -.upload-button:active, -.btn-upload:active { - background-color: #2b309c; -} - -/* Delete Button Styles */ -.delete-button { - background: none; - border: none; - color: var(--color-primary); - cursor: pointer; - width: 32px; - padding: 4px 8px; - border-radius: 4px; - transition: opacity 0.2s, background-color 0.2s; -} - -.delete-button:hover { - color: #ff7878; -} - -.delete-button:active { - opacity: 0.6; -} - -/* File Actions */ -.file-actions { - display: flex; - gap: var(--spacing-xs); -} - -.action-button { - background: none; - border: none; - cursor: pointer; - width: 32px; - padding: 6px 8px; - border-radius: 4px; - transition: background-color 0.2s; -} - -.download-button { - color: var(--color-primary); -} - -.download-button:hover { - background-color: var(--color-border); -} - -.light-mode .download-button:hover { - background-color: #c6d4de; -} - -/* Responsive Design */ -@media (max-width: 768px) { - .file-header, - .file-item { - grid-template-columns: 1fr 0.5fr 80px; - } - - .file-cell-date, - .file-date { - display: none; - } -} - -@media (max-width: 540px) { - .file-header, - .file-item { - grid-template-columns: 1fr 80px; - } - - .file-cell-size, - .file-size, - .file-cell-date, - .file-date { - display: none; - } -} diff --git a/webui/css/history.css b/webui/css/history.css deleted file mode 100644 index 6608c97ae1..0000000000 --- a/webui/css/history.css +++ /dev/null @@ -1,25 +0,0 @@ -/* History Styles */ - -/* ACE Editor Scrollbar */ -.ace_scrollbar-v { - overflow-y: auto; - } - - /* JSON Viewer Container */ - #json-viewer-container { - width: 100%; - height: 71vh; - border-radius: 0.4rem; - overflow: auto; - } - - #json-viewer-container::-webkit-scrollbar { - width: 0; - } - - /* Viewer Styles */ - .history-viewer { - overflow: hidden; - margin-bottom: 0.5rem; - } - \ No newline at end of file diff --git a/webui/css/messages.css b/webui/css/messages.css index 2ee0dd3e6c..5df94cf246 100644 --- a/webui/css/messages.css +++ b/webui/css/messages.css @@ -1,5 +1,6 @@ /* Chat History */ #chat-history { + background-color: var(--color-chat-background); position: relative; display: -webkit-flex; display: flex; @@ -9,11 +10,14 @@ overflow-y: scroll; overflow-x: hidden; scroll-behavior: auto !important; /* avoid infinite scrolling! */ - padding: var(--spacing-md) var(--spacing-sm) 0; + /* padding: var(--spacing-md) 10em 0 !important; */ -webkit-transition: all 0.3s ease; transition: all 0.3s ease; scrollbar-width: thin; scrollbar-color: #555 transparent; + padding-left: var(--spacing-sm); + padding-right: var(--spacing-sm); + padding-bottom:5em; } #chat-history > *:first-child { @@ -49,13 +53,11 @@ /* Message Styles */ .user-container { - align-self: flex-end; - /* margin: var(--spacing-sm) var(--spacing-md); */ - /* margin-bottom: var(--spacing-lg); */ - /* margin-top: var(--spacing-sm); */ display: flex; justify-content: flex-end; + position: relative; width: 100%; + padding-left: 20%; } .ai-container { @@ -78,14 +80,14 @@ } .message-user { - background-color: #4a4a4a; - /* border-bottom-right-radius: var(--spacing-xxs); */ - /* min-width: 195px; */ text-align: end; + margin-bottom: var(--spacing-md); + border: 1px solid var(--color-border); + border-radius: var(--border-radius); } .message-user > div { - padding-top: var(--spacing-xs); + /* padding-top: var(--spacing-xs); */ /* font-family: "Roboto Mono", monospace; */ font-optical-sizing: auto; -webkit-font-optical-sizing: auto; @@ -99,8 +101,16 @@ .message-user .message-text pre { font-family: var(--font-family-main); + font-weight: 200; + font-size: larger; + /* color: azure; */ + color: var(--color-text); } +/* .light-mode .message-user .message-text pre { + color: #2e2e2e; +} */ + .message-ai { /* border-bottom-left-radius: var(--spacing-xxs); */ } @@ -116,7 +126,7 @@ } .message-followup .message { - border-radius: 1.125em; /* 18px */ + border-radius: 0; /* border-top-left-radius: var(--spacing-xxs); */ } @@ -134,34 +144,111 @@ .message-browser, .message-info, .message-util, -.message-warning, -.message-error { +.message-warning { color: #e0e0e0; + background-color: transparent; + border-radius: 0; + box-shadow: none; } .message-default { - background-color: #1a242f; + background-color: transparent; } .message-agent { - background-color: #34506b; + background-color: transparent; } .message-agent-response { min-width: 255px; - background-color: #1f3c1e; + background-color: transparent; + opacity: 0.85; } .message-agent-delegation { - background-color: #12685e; + background-color: transparent; } .message-tool { - background-color: #2a4170; + background-color: transparent; } .message-code-exe { - background-color: #4b3a69; + background-color: transparent; +} + +/* Terminal-style code execution block */ +.message-code-exe .message-body { + background: #0d1117; + border: 1px solid rgba(48, 54, 61, 0.8); + border-radius: 6px; + padding: 0; + margin-top: 0.5em; + overflow: hidden; +} + +.message-code-exe .msg-heading { + background: linear-gradient(180deg, #21262d 0%, #161b22 100%); + padding: 6px 12px; + border-bottom: 1px solid rgba(48, 54, 61, 0.8); + display: flex; + align-items: center; + gap: 8px; +} + +.message-code-exe .msg-heading h4 { + font-family: var(--font-family-code); + font-size: 0.75rem; + color: #8b949e; + margin: 0; + display: flex; + align-items: center; + gap: 6px; +} + +.message-code-exe .msg-heading h4::before { + content: "$"; + color: #7ee787; + font-weight: 600; +} + +.message-code-exe .msg-content { + padding: 12px; + font-family: var(--font-family-code); + font-size: 0.72rem; + color: #c9d1d9; + line-height: 1.5; + max-height: 300px; + overflow-y: auto; +} + +.message-code-exe .msg-content pre { + margin: 0; + white-space: pre-wrap; + word-break: break-word; +} + +/* Light mode terminal */ +.light-mode .message-code-exe .message-body { + background: #f6f8fa; + border-color: #d0d7de; +} + +.light-mode .message-code-exe .msg-heading { + background: linear-gradient(180deg, #f6f8fa 0%, #eaeef2 100%); + border-bottom-color: #d0d7de; +} + +.light-mode .message-code-exe .msg-heading h4 { + color: #57606a; +} + +.light-mode .message-code-exe .msg-heading h4::before { + color: #1a7f37; +} + +.light-mode .message-code-exe .msg-content { + color: #24292f; } .message-body .message-markdown-table-wrap { @@ -181,40 +268,33 @@ white-space: break-spaces; } -.light-mode .message-code-exe .message-body { - border: 1px solid var(--color-border); -} +/* Light mode code-exe styling moved to main terminal block above */ .message-browser { - background-color: #4b3a69; + background-color: transparent; } .message-info { - background-color: var(--color-panel); + background-color: transparent; } .message-util { - background-color: #23211a; + background-color: transparent; display: none; } .message-warning { - background-color: #bc8036; + background-color: transparent; } .message-error { - background-color: #af2222; + background-color: rgba(180, 40, 40, 0.25); + border: 1px solid rgba(220, 60, 60, 0.5); + border-radius: 8px !important; + padding: 12px !important; } -.message-code-exe .message-body { - min-height: 5em; - width: 100%; - background-color: var(--color-panel); - border-radius: 0.5em; - margin-top: 0.5em; - padding: 0.3em; - font-family: var(--font-family-code); -} +/* Terminal styling moved to new terminal block above */ /* Agent and AI Info */ .agent-start { @@ -254,7 +334,7 @@ } .msg-kvps tr { - border-bottom: 1px solid rgba(255, 255, 255, 0.15); + border-bottom: none; } .msg-heading { @@ -369,6 +449,26 @@ } /* Media Queries */ +@media (min-width: 55em) { + #chat-history { + padding-left: max(var(--spacing-sm), calc((100% - 55em) / 2)) !important; + padding-right: max(var(--spacing-sm), calc((100% - 55em) / 2)) !important; + } +} + +@media (min-width: 1025px) and (max-width: 55em) { + #chat-history { + padding-left: var(--spacing-sm) !important; + padding-right: var(--spacing-sm) !important; + } +} + +@media (max-width: 1024px) { + #chat-history { + padding: var(--spacing-md) var(--spacing-sm) 0 !important; + } +} + @media (max-width: 640px) { /* New styles for mobile messages */ @@ -415,66 +515,16 @@ } .light-mode .msg-kvps tr { - border-bottom: 1px solid rgb(192 192 192 / 50%); -} - -.light-mode .message-default { - background-color: var(--color-panel); - color: #1a242f; -} - -.light-mode .message-agent { - background-color: var(--color-panel); - color: #356ca3; -} - -.light-mode .message-agent-response { - background-color: var(--color-panel); - color: #188216; -} - -.light-mode .message-agent-delegation { - background-color: var(--color-panel); - color: #12685e; -} - -.light-mode .message-tool { - background-color: var(--color-panel); - color: #1c3c88; -} - -.light-mode .message-code-exe { - background-color: var(--color-panel); - color: #6c43b0; -} - -.light-mode .message-browser { - background-color: var(--color-panel); - color: #6c43b0; -} - -.light-mode .message-info { - background-color: var(--color-panel); - color: #3f3f3f; -} - -.light-mode .message-util { - background-color: var(--color-panel); - color: #5b5540; -} - -.light-mode .message-warning { - background-color: var(--color-panel); - color: #8f4800; + border-bottom: none; } .light-mode .message-error { - background-color: var(--color-panel); + background-color: rgba(220, 60, 60, 0.15); + border: 1px solid rgba(180, 40, 40, 0.4); color: #8f1010; } .light-mode .message-user { - background-color: var(--color-panel); color: #4e4e4e; } @@ -484,7 +534,7 @@ } .message-agent-response .msg-content { - font-size: var(--font-size-smaller); + font-size: var(--font-size-larger); } .message-agent-response .msg-content img { @@ -569,6 +619,13 @@ border-radius: 0.3em; } +.msg-content hr { + border: 0; + border-top: 1px solid var(--color-border); + /* border-top-color: color-mix(in srgb, var(--color-text) 50%, transparent); */ + height: 0; +} + .msg-min-max-btns { opacity: 40%; position: absolute; @@ -583,10 +640,9 @@ margin-top: 0.5em; margin-bottom: 0.5em; display: inline-grid; - grid-template-columns: minmax(0, max-content); + grid-template-columns: minmax(0, 100%); grid-auto-rows: auto; max-width: 100%; - width: fit-content; gap: var(--spacing-xs); } @@ -597,29 +653,14 @@ .message-group-right { width: 100%; justify-content: end; + margin-top: 5em; + margin-bottom:5em; } .message-group-mid { margin-left: 2em; } -/* 1. FIRST child’s .message – clear ONLY bottom corners */ -.message-group > *:first-child:not(:last-child) > .message { - border-bottom-left-radius: var(--spacing-xxs); - border-bottom-right-radius: var(--spacing-xxs); -} - -/* 2. MIDDLE children’s .message – clear ALL corners */ -.message-group > *:not(:first-child):not(:last-child) > .message { - border-radius: var(--spacing-xxs); -} - -/* 3. LAST child’s .message – clear ONLY top corners */ -.message-group > *:last-child:not(:first-child) > .message { - border-top-left-radius: var(--spacing-xxs); - border-top-right-radius: var(--spacing-xxs); -} - .message-container { animation: fadeIn 0.5s; -webkit-animation: fadeIn 0.5s; @@ -630,7 +671,7 @@ .message { /* background-color: var(--color-message-bg); */ - border-radius: var(--border-radius); + /* border-radius: 0; */ padding: 0.9rem var(--spacing-sm) 0.7rem var(--spacing-sm); overflow-x: auto; width: auto; @@ -639,10 +680,10 @@ /* display: block; */ word-break: break-word; overflow-wrap: anywhere; + box-shadow: none; } /* shades */ .dark-mode .message { - box-shadow: inset 0 2rem 2rem -2rem rgba(0, 0, 0, 0.3), - inset 0 -2rem 2rem -2rem rgba(0, 0, 0, 0.1); + box-shadow: none; } diff --git a/webui/css/modals.css b/webui/css/modals.css index 61d22ffe07..4203ba4121 100644 --- a/webui/css/modals.css +++ b/webui/css/modals.css @@ -1,6 +1,42 @@ /* Modal Styles */ +/* Until the full transition of A0 modals is complete, +consider these classes as part of the old modal system that will be deprecated soon +(currently used only by Settings modal). .modal-content, .modal-container, .modal-overlay -/* Overlay */ +Do not maintain these classes, but be careful: +some classes like modal-header are shared between the old and the new system */ + +.modal { + display: none; + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: 2000; +} +.modal.show { + display: block; +} + +.modal-inner { + display: flex; + flex-direction: column; + overflow: hidden; + position: fixed; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + background-color: var(--color-background); + width: 90%; + max-width: 960px; + max-height: 90vh; + border-radius: 8px; + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); + z-index: 2; +} + +/* Overlay (old system) */ .modal-overlay { position: fixed; top: 0; @@ -14,7 +50,19 @@ z-index: 2001; } -/* Modal Container */ +/* Modal Backdrop */ +.modal-backdrop { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background-color: rgba(0, 0, 0, 0.5); + z-index: 1; + cursor: pointer; +} + +/* Modal Container (old system) */ .modal-container { background-color: var(--color-panel); border-radius: 12px; @@ -27,24 +75,6 @@ box-sizing: border-box; } -.light-mode .modal-container { - background-color: var(--color-panel-light); -} - -/* Mobile Viewport Behavior */ -@media (max-width: 1280px) { - .modal-container { - width: 95%; /* Take up most of the screen on mobile */ - min-width: unset; /* Remove min-width constraints */ - max-width: 95%; /* Ensure consistent width */ - } - - /* Ensure section content can scroll horizontally */ - .section { - overflow-x: auto; - } -} - /* Modal Header */ .modal-header { display: grid; @@ -56,9 +86,7 @@ color: var(--color-primary); border-bottom: 1px solid var(--color-border); } - .modal-header h2 { - /* font-size: var(--font-size-large); */ margin: 0; } @@ -96,7 +124,6 @@ .modal-content { padding: 0.5rem 1.5rem 0 1.5rem; overflow-y: auto; - overflow-x: hidden; height: calc(90vh); flex-grow: 1; background-clip: border-box; @@ -106,123 +133,45 @@ box-sizing: border-box; } -.modal-content::-webkit-scrollbar { - width: 6px; - height: 6px; -} - -.modal-content::-webkit-scrollbar-track { - background: transparent; - margin: 4px 0; - border-radius: 6px; -} - -.modal-content::-webkit-scrollbar-thumb { - background-color: rgba(155, 155, 155, 0.5); - border-radius: 6px; - transition: background-color 0.2s ease; -} - -.modal-content::-webkit-scrollbar-thumb:hover { - background-color: rgba(155, 155, 155, 0.7); -} - -/* Full Screen Input Modal Styles */ -.full-screen-input-modal { - width: 90%; - max-width: 800px; - max-height: 80vh; - position: relative; - padding: 0; - background-color: rgb(20, 20, 20, 0.96); - border: 1.5px solid var(--color-border); -} - -.full-screen-input-modal h2 { - margin: 0; - padding: 0; - font-size: 1.1rem; - color: var(--color-text); - opacity: 0.8; +.modal-scroll { + max-height: 90vh; + overflow-y: auto; + padding: 0 1rem 1rem 1rem; } -.full-screen-input-modal .modal-close { +.modal-x { position: absolute; - top: 1.2rem; + top: 1rem; right: 1rem; - font-size: 1.5rem; - padding: 0 0.5rem; - line-height: 0.8; -} - -.full-screen-input-modal .btn-ok { - margin-right: 1rem; -} - -.full-screen-input-modal .modal-content { - height: calc(80vh); - padding: 0; - margin: 0; - overflow: hidden; -} - -.full-screen-input-modal .modal-footer { - background: transparent; - max-height: 50px; -} - -#full-screen-input { - width: 100%; - height: calc(100% - 50px); + background: none; border: none; - background-color: transparent; + font-size: 1.5rem; + cursor: pointer; color: var(--color-text); - font-family: "Roboto Mono", monospace; - font-optical-sizing: auto; - font-size: 0.955rem; - padding: 1.2rem 1rem; - resize: none; - outline: none; + padding: 0.5rem; + line-height: 1; + z-index: 3; } -#full-screen-input::-webkit-scrollbar { +.modal-content::-webkit-scrollbar, .modal-scroll::-webkit-scrollbar { width: 6px; height: 6px; } - -#full-screen-input::-webkit-scrollbar-track { +.modal-content::-webkit-scrollbar-track, .modal-scroll::-webkit-scrollbar-track { background: transparent; - margin: 14px; + margin: 4px 0; border-radius: 6px; } - -#full-screen-input::-webkit-scrollbar-thumb { +.modal-content::-webkit-scrollbar-thumb, .modal-scroll::-webkit-scrollbar-thumb { background-color: rgba(155, 155, 155, 0.5); border-radius: 6px; - -webkit-transition: background-color 0.2s ease; transition: background-color 0.2s ease; } - -#full-screen-input::-webkit-scrollbar-thumb:hover { +.modal-content::-webkit-scrollbar-thumb:hover, .modal-scroll::-webkit-scrollbar-thumb:hover { background-color: rgba(155, 155, 155, 0.7); } -.light-mode .full-screen-input-modal { - background-color: rgb(220, 220, 220, 0.86); -} - -.full-screen-input-modal .modal-footer { - padding: 1rem 0; - border-top: none; - background: transparent; -} - -/* Buttons Container */ -#buttons-container { - display: flex; - gap: 0.875rem !important; -} - +/* Modal with footer support */ .modal-footer { display: flex; justify-content: flex-end; @@ -232,6 +181,20 @@ background: var(--color-background); gap: 1rem; } +.modal-inner.modal-with-footer { + display: flex; + flex-direction: column; +} +.modal-inner.modal-with-footer .modal-scroll { + flex: 1; + min-height: 0; + overflow-y: auto; +} +.modal-footer-slot { + flex-shrink: 0; + border-top: 1px solid var(--color-border); + background: var(--color-background); +} /* Section Styles */ .section { @@ -241,19 +204,41 @@ border: 1px solid var(--color-border); border-radius: 0.5rem; } - .section-title { font-size: 1.25rem; font-weight: bold; color: var(--color-primary); margin-bottom: 0.5rem; } - .section-description { color: var(--color-text); margin-bottom: 1rem; } +/* Buttons Container */ +.modal-button-container { + display: flex; + justify-content: space-between; + gap: 10px; + width: 100%; + margin-top: 20px; +} +.modal-button-container .button { + flex: 1; + min-width: 0; + padding: 10px; + text-align: center; + white-space: nowrap; + font-size: 0.9em; + display: flex; + justify-content: center; + align-items: center; +} +#buttons-container { + display: flex; + gap: 0.875rem !important; +} + /* Button Styles */ .btn { font-weight: 500; @@ -264,22 +249,17 @@ font-size: 0.875rem; font-family: "Rubik", Arial, Helvetica, sans-serif; } - .btn.slim { padding: 0.2em 0.4em; } - .btn.primary { background: #2196f3; color: white; width: fit-content; } - .btn:disabled { - /* background: #ccc; */ cursor: not-allowed; } - .btn-ok { background: #4248f1; color: white; @@ -288,122 +268,47 @@ gap: 0.5rem; transition: background 0.3s ease-in-out; } - .btn-ok > svg { max-width: 20px; } - .btn-ok:hover { background: #353bc5; } - .btn-ok:active { background: #2b309c; } - .btn-cancel { background: transparent; color: var(--color-accent); border: 0.15rem solid var(--color-accent); transition: background 0.3s ease-in-out, color 0.3s ease-in-out; } - .btn-cancel:hover { background: var(--color-accent); color: var(--color-text); } - .btn-cancel:active { background: #a94658; color: var(--color-text); } - .light-mode .btn-cancel:hover { background: var(--color-accent); color: var(--color-background); } - .light-mode .btn-cancel:active { background: #a94658; color: var(--color-background); } - .btn-field { background: #2196f3; color: white; width: fit-content; } - .btn-field:disabled { background: #ccc; cursor: not-allowed; } -/* Typography */ -/* h2 { - color: var(--color-primary); -} */ - -/* Responsive Design */ -@media (max-width: 768px) { - .modal-header { - padding-left: 1.1rem; - text-wrap: nowrap; - } - - .modal-content { - padding: 0.5rem; - overflow-y: auto; - flex-grow: 1; - } - - .modal-footer { - padding: var(--spacing-sm) 0 var(--spacing-sm) 0 !important; - } - - .section { - margin-bottom: 1.5rem; - padding: 1rem; - padding-bottom: 0; - border: 1px solid var(--color-border); - border-radius: 0.5rem; - } - - #buttons-container { - margin: 0 auto; - } - - .btn { - padding: 0.5rem 1.7rem; - } -} - -@media (max-width: 540px) { - .modal-header h2 { - font-size: var(--font-size-normal); - margin: 0; - } - - #buttons-container { - max-height: 50px; - } - - .btn { - text-wrap: wrap; - font-size: var(--font-size-small); - } - - .btn-upload { - margin: 0 auto; - gap: 0.5rem; - align-items: center; - } - - .btn-upload > svg { - width: 20px; - } -} - /* Editor Toolbar */ .editor-toolbar { display: flex; @@ -414,11 +319,9 @@ background-color: rgba(30, 30, 30, 0.95); border-bottom: 1px solid var(--color-border); } - .light-mode .editor-toolbar { background-color: rgba(240, 240, 240, 0.95); } - .toolbar-group { display: flex; align-items: center; @@ -426,11 +329,9 @@ padding: 0 0.5rem; border-right: 1px solid var(--color-border); } - .toolbar-group:last-child { border-right: none; } - .toolbar-button { display: flex; align-items: center; @@ -446,38 +347,31 @@ cursor: pointer; transition: all 0.2s ease; } - .toolbar-button svg { width: 18px; height: 18px; } - .toolbar-button:hover { opacity: 1; background-color: rgba(255, 255, 255, 0.1); } - .toolbar-button:active { transform: translateY(1px); } - .toolbar-button.active { background-color: rgba(255, 255, 255, 0.15); border-color: var(--color-border); opacity: 1; } - .toolbar-button:disabled { opacity: 0.3; cursor: not-allowed; } - .toolbar-button:disabled:hover { background-color: transparent; } /* Range Input Styles */ - input[type="range"] { width: 100%; cursor: pointer; @@ -487,28 +381,147 @@ input[type="range"] { background: var(--color-border); border-radius: 2px; outline: none; + transition: all 0.2s ease; } - input[type="range"]::-webkit-slider-thumb { -webkit-appearance: none; appearance: none; width: 16px; height: 16px; - background: var(--color-text); + background: var(--color-primary); border-radius: 50%; cursor: pointer; + transition: all 0.2s ease; } input[type="range"]::-moz-range-thumb { width: 16px; height: 16px; - background: var(--color-text); + background: var(--color-primary); border-radius: 50%; cursor: pointer; border: none; + transition: all 0.2s ease; } .range-value { min-width: 3em; text-align: right; -} \ No newline at end of file +} + +/* Light mode overrides */ +.light-mode input[type="range"]::-webkit-slider-thumb { + background: #777; +} +.light-mode input[type="range"]::-moz-range-thumb { + background: #777; +} + +/* Responsive Design */ +@media (max-width: 1280px) { + .modal-container { + width: 95%; + min-width: unset; /* Remove min-width constraints */ + max-width: 95%; + } + .section { + overflow-x: auto; + } +} +@media (max-width: 768px) { + .modal-header { + padding-left: 1.1rem; + text-wrap: nowrap; + } + .modal-content { + padding: 0.5rem; + overflow-y: auto; + flex-grow: 1; + } + .modal-footer { + padding: var(--spacing-sm) !important; + } + .section { + margin-bottom: 1.5rem; + padding: 1rem; + padding-bottom: 0; + border: 1px solid var(--color-border); + border-radius: 0.5rem; + } + #buttons-container { + margin: 0 auto; + } + .btn { + padding: 0.5rem 1.7rem; + } +} +@media (max-width: 540px) { + .modal-header h2 { + font-size: var(--font-size-normal); + margin: 0; + } + #buttons-container { + max-height: 50px; + } + .btn { + text-wrap: wrap; + font-size: var(--font-size-small); + } + .btn-upload { + margin: 0 auto; + gap: 0.5rem; + align-items: center; + } + .btn-upload > svg { + width: 20px; + } +} + +.loading { + width: calc(100% - 4rem); + max-width: 1200px; + min-height: 50px; + border-radius: 12px; + background: var(--color-border); + position: relative; + overflow: hidden; + margin: 2rem auto; + opacity: 0; + animation: fadeIn 500ms ease-out 500ms forwards; +} + +@keyframes fadeIn { + 0% { + opacity: 0; + } + 100% { + opacity: 1; + } +} + +.loading::before { + content: ""; + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: linear-gradient( + 90deg, + var(--color-background), + var(--color-border), + var(--color-background) + ); + animation: shimmer 2s infinite; + animation-delay: 250ms; + background-size: 200% 100%; +} + +@keyframes shimmer { + 0% { + background-position: 200% 0; + } + 100% { + background-position: -200% 0; + } +} diff --git a/webui/css/modals2.css b/webui/css/modals2.css deleted file mode 100644 index 804df88ec9..0000000000 --- a/webui/css/modals2.css +++ /dev/null @@ -1,190 +0,0 @@ -/* Modal Styles */ -.modal { - display: none; - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; - z-index: 2000; -} - -.modal.show { - display: block; -} - -.modal-backdrop { - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; - background-color: rgba(0, 0, 0, 0.5); - z-index: 1; - cursor: pointer; -} - -.modal-inner { -display: flex; - flex-direction: column; - overflow: hidden; - position: fixed; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - background-color: var(--color-background-light); - width: 90%; - max-width: 960px; - max-height: 90vh; - border-radius: 8px; - box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); - z-index: 2; -} - -.dark-mode .modal-inner { - background-color: var(--color-background-dark); - box-shadow: 0 4px 6px rgba(0, 0, 0, 0.3); -} - -/* Mobile Viewport Behavior */ -@media (max-width: 1280px) { - .modal-inner { - width: 95%; /* Take up most of the screen on mobile */ - min-width: unset; /* Remove min-width constraints */ - max-width: 95%; /* Ensure consistent width */ - } - - /* Ensure section content can scroll horizontally */ - .section { - overflow-x: auto; - } - } - -.modal-scroll { - max-height: 90vh; - overflow-y: auto; - padding: 0 1rem 1rem 1rem; -} - -.modal-x { - position: absolute; - top: 1rem; - right: 1rem; - background: none; - border: none; - font-size: 1.5rem; - cursor: pointer; - color: var(--text-primary-light); - padding: 0.5rem; - line-height: 1; - z-index: 3; -} - -.dark-mode .modal-x { - color: var(--text-primary-dark); -} - -.modal-title { - /* margin: 0 0 1.5rem 0; - padding-right: 3rem; - color: var(--text-primary-light); - font-size: 1.75rem; - border-bottom: 1px solid rgba(0, 0, 0, 0.1); - padding-bottom: 1rem; */ -} - -.dark-mode .modal-title { - color: var(--text-primary-dark); - border-bottom-color: rgba(255, 255, 255, 0.1); -} - -.modal-button-container { - display: flex; - justify-content: space-between; - gap: 10px; - width: 100%; - margin-top: 20px; -} - -.modal-button-container .button { - flex: 1; - min-width: 0; - padding: 10px; - text-align: center; - white-space: nowrap; - font-size: 0.9em; - display: flex; - justify-content: center; - align-items: center; -} - -.loading { - width: calc(100% - 4rem); - max-width: 1200px; - min-height: 50px; - border-radius: 12px; - position: relative; - overflow: hidden; - margin: 2rem auto; - opacity: 0; - animation: fadeIn 500ms ease-out 500ms forwards; -} - -.light-mode .loading { - background: var(--color-border-light); -} - -.dark-mode .loading { - background: var(--color-border-dark); -} - -@keyframes fadeIn { - 0% { - opacity: 0; - } - 100% { - opacity: 1; - } -} - -.loading::before { - content: ""; - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; -} - -.dark-mode .loading::before { - background: linear-gradient( - 90deg, - var(--color-background-dark), - var(--color-border-dark), - var(--color-background-dark) - ); - animation: shimmer 2s infinite; - animation-delay: 250ms; - background-size: 200% 100%; -} - -.light-mode .loading::before { - background: linear-gradient( - 90deg, - var(--color-background-light), - color-mix(in srgb, var(--color-border-light) 95%, black), - var(--color-background-light) - ); - animation: shimmer 2s infinite; - animation-delay: 250ms; - background-size: 200% 100%; -} - -@keyframes shimmer { - 0% { - background-position: 200% 0; - } - 100% { - background-position: -200% 0; - } -} diff --git a/webui/css/scheduler.css b/webui/css/scheduler.css new file mode 100644 index 0000000000..ad12b482cd --- /dev/null +++ b/webui/css/scheduler.css @@ -0,0 +1,704 @@ +/* Scheduler Styles */ + +/* ========================================================================== + Status Badges + ========================================================================== */ + +.scheduler-status-badge { + display: inline-block; + padding: 4px 8px; + border-radius: 4px; + font-size: 12px; + font-weight: 500; + text-transform: capitalize; + white-space: nowrap; + cursor: pointer; +} + +.scheduler-status-idle { + background-color: rgba(0, 100, 0, 0.2); + color: #2a9d8f; + border: 1px solid rgba(42, 157, 143, 0.3); +} + +.scheduler-status-running { + background-color: rgba(0, 60, 120, 0.2); + color: #4361ee; + border: 1px solid rgba(67, 97, 238, 0.3); +} + +.scheduler-status-disabled { + background-color: rgba(70, 70, 70, 0.2); + color: #6c757d; + border: 1px solid rgba(108, 117, 125, 0.3); +} + +.scheduler-status-error { + background-color: rgba(120, 0, 0, 0.2); + color: #e63946; + border: 1px solid rgba(230, 57, 70, 0.3); +} + +.scheduler-status-badge-small { + font-size: 10px; + padding: 2px 6px; + margin-right: 5px; + min-width: 40px; + text-align: center; +} + +/* Status badge selected state */ +.scheduler-status-selected { + opacity: 1 !important; + box-shadow: 0 0 0 2px var(--color-bg), 0 0 0 4px var(--color-border); + border: 1px solid var(--color-border) !important; + outline: none; +} + +/* Light mode status badges */ +.light-mode .scheduler-status-idle { + background-color: rgba(42, 157, 143, 0.1); + color: #1a6f65; +} + +.light-mode .scheduler-status-running { + background-color: rgba(67, 97, 238, 0.1); + color: #2540b3; +} + +.light-mode .scheduler-status-disabled { + background-color: rgba(108, 117, 125, 0.1); + color: #495057; +} + +.light-mode .scheduler-status-error { + background-color: rgba(230, 57, 70, 0.1); + color: #c5283d; +} + +.light-mode .scheduler-status-selected { + box-shadow: 0 0 0 2px var(--color-bg-light), 0 0 0 4px var(--color-accent); +} + +/* ========================================================================== + Container & Layout + ========================================================================== */ + +.scheduler-container { + width: 100%; + box-sizing: border-box; + display: block; + padding: 0.5rem 0; +} + +.scheduler-loading { + text-align: center; + padding: 40px 0; + color: var(--color-text-secondary); +} + +.task-container-vertical { + display: flex; + flex-direction: column; + width: 100%; + gap: 6px; +} + +/* ========================================================================== + Task List Table + ========================================================================== */ + +.scheduler-task-list { + width: 100%; + min-width: 100%; + margin: 0; + border-collapse: separate; + border-spacing: 0; + white-space: nowrap; + padding-bottom: 8px; + table-layout: fixed; +} + +.scheduler-task-list th, +.scheduler-task-list td { + padding: 8px 12px; + text-align: left; + vertical-align: middle; + border-bottom: 1px solid var(--color-border); +} + +/* Column styles */ +.scheduler-task-list .col-name { + width: 30%; + overflow: hidden; + text-overflow: ellipsis; +} + +.scheduler-task-list .col-state { + width: 12%; +} + +.scheduler-task-list .col-project { + width: 20%; +} + +.scheduler-task-list .col-lastrun { + width: 20%; +} + +.scheduler-task-list .col-actions { + width: 18%; + text-align: right; +} + +/* Task actions container */ +.scheduler-task-actions { + display: flex; + justify-content: flex-end; + gap: 10px; + flex-wrap: nowrap; +} + +/* Task action buttons */ +.scheduler-task-action { + display: inline-flex; + align-items: center; + justify-content: center; + background-color: transparent; + border: 1px solid var(--color-border); + color: var(--color-text); + padding: 4px; + border-radius: 4px; + cursor: pointer; + transition: all 0.2s ease; + width: 28px; + height: 28px; + flex-shrink: 0; +} + +.scheduler-task-action:hover { + background-color: var(--color-secondary); +} + +/* ========================================================================== + Form Styles + ========================================================================== */ + +.scheduler-form { + display: flex; + flex-direction: column; + gap: 1.5rem; + padding: 1rem 0; +} + +.scheduler-form-header { + display: flex; + padding-bottom: var(--spacing-sm); + border-bottom: 1px solid var(--color-border); + justify-content: space-between; +} + +.scheduler-form-title { + font-size: 1.25rem; + font-weight: bold; + color: var(--color-primary); + margin: 0; +} + +.scheduler-form-actions { + display: flex; + gap: 0.8rem; + justify-content: flex-end; + align-items: center; +} + +.scheduler-form-grid { + display: grid; + grid-template-columns: 1fr; + gap: 1.5rem; + overflow-x: auto; +} + +.scheduler-form-field { + display: grid; + grid-template-columns: 1fr 2fr; + gap: 1rem; + align-items: flex-start; +} + +.full-width { + grid-column: 1 / -1; +} + +.scheduler-form-label { + font-weight: bold; + color: var(--color-primary); + margin-bottom: 0.25rem; +} + +.scheduler-form-help { + color: var(--color-text); + font-size: 0.875rem; + opacity: 0.8; + margin: 0.25rem 0 0.5rem 0; +} + +/* Label and help text wrapper for tighter grouping */ +.label-help-wrapper { + display: flex; + flex-direction: column; + gap: 2px; + margin-bottom: 0.5rem; +} + +.label-help-wrapper .scheduler-form-label { + margin-bottom: 2px; +} + +.label-help-wrapper .scheduler-form-help { + margin-top: 0; + margin-bottom: 0; +} + +/* ========================================================================== + Detail View + ========================================================================== */ + +.scheduler-detail-view { + background-color: var(--color-bg-secondary); + border-radius: 4px; + margin-bottom: var(--spacing-md); + animation: fadeIn 0.3s ease; +} + +.scheduler-detail-header { + display: flex; + justify-content: space-between; + align-items: center; + flex-wrap: wrap; + gap: 10px; + margin: var(--spacing-md) 0; + width: 100%; +} + +.scheduler-detail-header-left { + display: flex; + align-items: center; + gap: 10px; + flex-wrap: wrap; +} + +.scheduler-detail-header-left .scheduler-detail-title { + margin: 0; +} + +.scheduler-detail-header .scheduler-detail-title { + font-size: 1.4rem; + font-weight: 500; + margin: 0; + margin-right: auto; +} + +.scheduler-detail-header .scheduler-status-badge { + margin-right: 10px; +} + +.scheduler-detail-header-actions { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.scheduler-detail-header .btn-action-header { + padding: 0.4rem 0.6rem; + display: flex; + align-items: center; + justify-content: space-between; + background: var(--color-bg-secondary); + border: 1px solid var(--color-border); + color: var(--color-text); + cursor: pointer; +} + +.scheduler-detail-header .btn-action-header .btn-cancel { + margin-left: var(--spacing-sm); +} + +.scheduler-detail-header .btn-action-header:hover { + border-color: var(--color-primary); + color: var(--color-primary); +} + +.scheduler-detail-header .btn-action-header.danger:hover { + border-color: var(--color-accent); + color: var(--color-accent); +} + +.scheduler-detail-header .btn-action-header .material-symbols-outlined { + font-size: 18px; +} + +.scheduler-detail-content { + margin-bottom: 20px; +} + +.scheduler-details-grid { + display: grid; + grid-template-columns: 120px 1fr; + gap: 8px 16px; + margin-top: var(--spacing-md); +} + +.scheduler-details-label { + font-weight: 500; + color: var(--color-text-secondary); + display: flex; + align-items: center; +} + +.scheduler-details-value { + color: var(--color-text); + word-break: break-word; +} + +.scheduler-details-actions { + display: flex; + justify-content: flex-end; + gap: 8px; +} + +/* ========================================================================== + State Selector + ========================================================================== */ + +.scheduler-state-selector { + display: flex; + gap: 10px; + flex-wrap: wrap; +} + +.scheduler-state-selector .scheduler-status-badge { + cursor: pointer; + transition: all 0.2s ease; + opacity: 0.5; + border: 1px solid transparent; +} + +.scheduler-state-selector .scheduler-status-badge:hover { + opacity: 0.9; +} + +/* Make status badges in selector more prominent */ +.scheduler-state-selector .scheduler-status-idle, +.scheduler-state-selector .scheduler-status-running, +.scheduler-state-selector .scheduler-status-disabled, +.scheduler-state-selector .scheduler-status-error { + font-weight: 600; + padding: 6px 12px; +} + +/* State explanation styling */ +.scheduler-state-explanation { + margin-top: 10px; + font-size: 0.85rem; + color: var(--color-text-secondary); + line-height: 1.4; + min-height: 1.4em; + transition: all 0.2s ease; +} + +.scheduler-state-explanation span { + display: block; + padding: 4px 8px; + background-color: rgba(0, 0, 0, 0.05); + border-radius: 4px; + margin-top: 8px; +} + +.light-mode .scheduler-state-explanation span { + background-color: rgba(255, 255, 255, 0.3); +} + +/* ========================================================================== + Schedule Builder (for scheduled tasks) + ========================================================================== */ + +.scheduler-schedule-builder { + display: grid; + grid-template-columns: repeat(5, 1fr); + gap: 10px; + width: 100%; + margin-bottom: 10px; +} + +.scheduler-schedule-field { + display: flex; + flex-direction: column; + gap: 4px; + max-width: 70px; +} + +.scheduler-schedule-field input { + width: 100%; + min-width: 0; + font-size: 0.9rem; +} + +.scheduler-schedule-label { + font-size: 0.8rem; + margin-bottom: 5px; + color: var(--color-text); + opacity: 0.8; +} + +.scheduler-no-schedule { + color: var(--color-text-secondary); + opacity: 0.7; + font-style: italic; +} + +/* ========================================================================== + Plan Builder (for planned tasks) + ========================================================================== */ + +.scheduler-plan-builder { + width: 100%; + margin-bottom: 10px; + border: 1px solid var(--color-border); + border-radius: 8px; + padding: 10px; + background-color: rgba(0, 0, 0, 0.2); +} + +.scheduler-plan-todo { + display: flex; + flex-direction: column; +} + +.scheduler-plan-label { + font-size: 0.9rem; + margin-bottom: 10px; + color: var(--color-text); + font-weight: bold; +} + +.scheduler-todo-list { + display: flex; + flex-direction: column; + gap: 10px; + margin-top: 8px; + max-height: 200px; + overflow-y: auto; +} + +.scheduler-todo-item { + display: flex; + align-items: center; + justify-content: space-between; + background-color: var(--color-background); + border-radius: 6px; + padding: 8px 12px; + border: 1px solid var(--color-border); + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05); +} + +.scheduler-todo-item span { + flex: 1; + font-size: 14px; +} + +.scheduler-add-todo { + margin-top: 12px; + display: flex; + gap: 8px; + align-items: center; +} + +.scheduler-add-todo input { + flex: 1; +} + +.scheduler-add-todo input[type="datetime-local"] { + flex: 1; + min-width: 0; + padding: 8px 12px; + border-radius: 6px; + border: 1px solid var(--color-border); + background-color: var(--color-background); + color: var(--color-text); +} + +.scheduler-add-todo-button { + display: flex; + align-items: center; + justify-content: center; + background-color: var(--color-accent); + color: white; + border: none; + border-radius: 6px; + padding: 8px 12px; + cursor: pointer; + transition: background-color 0.2s ease; + font-weight: 500; +} + +.scheduler-add-todo-button:hover { + background-color: var(--color-accent-dark); +} + +.scheduler-todo-remove { + display: flex; + align-items: center; + justify-content: center; + background-color: transparent; + color: var(--color-text); + border: none; + border-radius: 4px; + width: 24px; + height: 24px; + cursor: pointer; + transition: background-color 0.2s ease; + margin-left: 8px; +} + +.scheduler-todo-remove:hover { + background-color: var(--color-accent-light); + color: var(--color-accent-dark); +} + +.scheduler-empty-plan { + padding: 12px; + color: var(--color-text-muted); + font-style: italic; + text-align: center; + border: 1px dashed var(--color-border); + border-radius: 6px; + margin-top: 8px; +} + +.light-mode .scheduler-todo-item { + background-color: var(--color-background-light); + border-color: var(--color-border-light); +} + +.light-mode .scheduler-todo-remove:hover { + background-color: #e0e0e0; + color: #d32f2f; +} + +/* ========================================================================== + Input Group (for token field with generate button) + ========================================================================== */ + +.input-group { + display: flex; + gap: 8px; + width: 100%; +} + +.input-group input[type="text"] { + flex: 1; + min-width: 0; +} + +/* Specific styling for the Generate button in token field */ +.input-group .scheduler-task-action { + white-space: nowrap; + padding: 4px 10px; + width: auto; + height: auto; + background-color: var(--color-secondary); + font-size: 0.9rem; +} + +.input-group .scheduler-task-action:hover { + background-color: var(--color-accent); + color: var(--color-bg); +} + +/* Ensure parent container allows proper flow */ +.scheduler-form-field .input-group { + max-width: 100%; + overflow: hidden; +} + +/* ========================================================================== + Responsive Design + ========================================================================== */ + +@media (max-width: 768px) { + /* Task list column visibility */ + .scheduler-task-list .col-project, + .scheduler-task-list .col-lastrun { + display: none; + } + + .scheduler-task-list .col-name { + width: 50%; + } + + .scheduler-task-list .col-state { + width: 25%; + } + + .scheduler-task-list .col-actions { + width: 25%; + } + + /* Form responsiveness */ + .scheduler-form-header { + flex-direction: column; + align-items: flex-start; + gap: 1rem; + } + + .scheduler-form-actions { + align-self: flex-end; + } + + .scheduler-form-field { + grid-template-columns: 1fr; + gap: 0.5rem; + } + + .scheduler-form-grid { + grid-template-columns: 1fr; + } + + /* Detail header responsiveness */ + .scheduler-detail-header { + gap: 0.75rem; + } + + .scheduler-detail-actions { + width: 100%; + flex-wrap: wrap; + gap: 0.5rem; + } + + /* Schedule builder responsiveness */ + .scheduler-schedule-builder { + grid-template-columns: 1fr 1fr; + } + + /* Input group responsiveness */ + .input-group .scheduler-task-action { + padding: 4px 8px; + font-size: 0.8rem; + } + + /* Plan builder responsiveness */ + .scheduler-add-todo { + flex-direction: column; + } +} + +@media (max-width: 480px) { + .input-group { + flex-direction: column; + } + + .input-group .scheduler-task-action { + align-self: flex-start; + } +} diff --git a/webui/css/settings.css b/webui/css/settings.css index 871b364016..12e37c79d5 100644 --- a/webui/css/settings.css +++ b/webui/css/settings.css @@ -47,7 +47,7 @@ textarea, select { width: 100%; padding: 0.5rem; - border: 1px solid var(--color-secondary); + border: 1px solid var(--color-border); border-radius: 0.25rem; background-color: var(--color-background); color: var(--color-text); @@ -56,22 +56,27 @@ select { transition: all 0.3s ease; } -input[type="text"]:focus, -input[type="password"]:focus { - opacity: 1; -} - textarea { min-height: 100px; font-family: 'Roboto Mono', monospace; scroll-behavior: smooth; resize: none; background-clip: border-box; - border: 6px solid transparent; } +input[type="text"]:focus, +input[type="number"]:focus, +input[type="password"]:focus, textarea:focus { - background-color: #151515; + background-color: var(--color-background); + filter: brightness(1.2); +} + +.light-mode input[type="text"]:focus, +.light-mode input[type="number"]:focus, +.light-mode input[type="password"]:focus, +.light-mode textarea:focus { + filter: brightness(1.1); } /* Button Disabled State */ @@ -281,12 +286,10 @@ nav ul li a img { .settings-tabs::before { left: 0; - background: linear-gradient(to right, var(--color-panel), transparent); } .settings-tabs::after { right: 0; - background: linear-gradient(to left, var(--color-panel), transparent); } .settings-tab { @@ -303,7 +306,6 @@ nav ul li a img { z-index: 1; min-width: min-content; width: auto; - max-width: 100px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; @@ -324,28 +326,10 @@ nav ul li a img { -4px 0 8px -2px var(--color-border); */ font-weight: bold; background-color: var(--color-panel); + min-width: min-content; + max-width: 150px; } -/* Light mode overrides */ -.light-mode .settings-tab.active { - /* color: var(--color-border); */ - /* box-shadow: - 0 -4px 8px -2px var(--color-border), - 4px 0 8px -2px var(--color-border), - -4px 0 8px -2px var(--color-border); */ -} - -.light-mode .settings-tab:not(.active) { - background-color: rgba(0, 0, 0, 0.03); -} - -.light-mode .settings-tabs::before { - background: linear-gradient(to right, var(--color-panel-light), transparent); -} - -.light-mode .settings-tabs::after { - background: linear-gradient(to left, var(--color-panel-light), transparent); -} /* Responsive Design for Settings Tabs */ @media (max-width: 640px) { @@ -368,131 +352,12 @@ nav ul li a img { .settings-tab { flex: 0 0 auto; text-align: center; - min-width: 60px; - max-width: 80px; + min-width: min-content; + max-width: 150px; } -} - -/* Scheduler Task List - updated with guaranteed width handling */ -.scheduler-task-list { - width: 100%; - min-width: 100%; - margin: 0; - border-collapse: separate; - border-spacing: 0; - white-space: nowrap; - padding-bottom: 8px; - table-layout: auto; -} - -.scheduler-task-list th, -.scheduler-task-list td { - padding: 8px 12px; - text-align: left; - vertical-align: middle; - border-bottom: 1px solid var(--color-border); -} - -/* Ensure columns have proper min-width */ -.scheduler-task-list th:nth-child(1), -.scheduler-task-list td:nth-child(1) { - min-width: 150px; - max-width: 200px; - overflow: hidden; - text-overflow: ellipsis; -} - -.scheduler-task-list th:nth-child(2), -.scheduler-task-list td:nth-child(2) { - min-width: 100px; -} - -.scheduler-task-list th:nth-child(3), -.scheduler-task-list td:nth-child(3) { - min-width: 100px; -} - -.scheduler-task-list th:nth-child(4), -.scheduler-task-list td:nth-child(4) { - min-width: 150px; -} - -.scheduler-task-list th:nth-child(5), -.scheduler-task-list td:nth-child(5) { - min-width: 180px; -} - -.scheduler-task-list th:nth-child(6), -.scheduler-task-list td:nth-child(6) { - min-width: 160px; - white-space: nowrap; -} - -/* Task actions container */ -.scheduler-task-actions { - display: flex; - justify-content: flex-end; - gap: 10px; - flex-wrap: nowrap; -} - -/* Scheduler form styles */ -.scheduler-form { - display: flex; - flex-direction: column; - gap: 1.5rem; - padding: 1rem 0; -} - -.scheduler-form-header { - display: flex; - justify-content: space-between; - align-items: center; - margin-bottom: 1.5rem; -} - -.scheduler-form-title { - font-size: 1.25rem; - font-weight: bold; - color: var(--color-primary); - margin: 0; -} - -.scheduler-form-actions { - display: flex; - gap: 0.8rem; - justify-content: flex-end; - align-items: center; -} - -.scheduler-form-grid { - display: grid; - grid-template-columns: 1fr; - gap: 1.5rem; - overflow-x: auto; -} - -.scheduler-form-field { - display: grid; - grid-template-columns: 1fr 2fr; - gap: 1rem; - align-items: flex-start; -} - -@media (max-width: 768px) { - .scheduler-form-header { - flex-direction: column; - align-items: flex-start; - gap: 1rem; - } - - .scheduler-form-actions { - align-self: flex-end; - } - - .scheduler-form-field { - grid-template-columns: 1fr; - gap: 0.5rem; + .settings-tab.active { + min-width: min-content; + max-width: 150px; } } @@ -503,8 +368,8 @@ nav ul li a img { padding-bottom: 0; border: 1px solid var(--color-border); border-radius: 0.5rem; - overflow-x: visible; /* Desktop: No horizontal scroll */ - width: 100%; /* Fill available width */ + overflow-x: visible; + width: 100%; min-width: min-content; display: block; box-sizing: border-box; @@ -522,50 +387,10 @@ nav ul li a img { margin-bottom: 1rem; } -/* Scheduler container - updated with guaranteed width handling */ -.scheduler-container { - width: 100%; - box-sizing: border-box; - display: block; - padding: 0.5rem 0; -} - -/* Scheduler task actions and buttons */ -.scheduler-task-action { - display: inline-flex; - align-items: center; - justify-content: center; - background-color: transparent; - border: 1px solid var(--color-border); - color: var(--color-text); - padding: 4px; - border-radius: 4px; - cursor: pointer; - transition: all 0.2s ease; - width: 28px; - height: 28px; - flex-shrink: 0; -} - -.scheduler-task-action:hover { - background-color: var(--color-secondary); -} - -/* Adjust media queries to handle small screens */ -@media (max-width: 768px) { - .scheduler-task-list { - min-width: 700px; - } - - .scheduler-detail-view { - min-width: 650px; - } -} - -/* Scrollbar styling for better visibility */ +/* Scrollbar styling for sections */ .section::-webkit-scrollbar { - height: 10px; /* Taller scrollbar for better usability */ - background-color: rgba(0,0,0,0.1); + height: 10px; + background-color: rgba(0, 0, 0, 0.1); } .section::-webkit-scrollbar-thumb { @@ -577,20 +402,10 @@ nav ul li a img { background-color: rgba(155, 155, 155, 0.9); } -/* Mobile styles for scheduler sections */ @media (max-width: 1280px) { - .scheduler-container { - min-width: max-content; /* Allow expansion based on content */ - } - - .scheduler-task-list { - min-width: max-content; /* Expand to fit content if needed */ - } - - /* Scrollbar styling for mobile view */ .section::-webkit-scrollbar { height: 10px; - background-color: rgba(0,0,0,0.1); + background-color: rgba(0, 0, 0, 0.1); } .section::-webkit-scrollbar-thumb { @@ -603,338 +418,34 @@ nav ul li a img { } } -/* Scheduler form field styling to match standard field styling */ -.scheduler-form-label { - font-weight: bold; - color: var(--color-primary); - margin-bottom: 0.25rem; /* Add consistent spacing between label and help text */ -} - -.scheduler-form-help { - color: var(--color-text); - font-size: 0.875rem; - opacity: 0.8; - margin: 0.25rem 0 0.5rem 0; /* Match the spacing of field-description */ -} - -/* Label and help text wrapper for tighter grouping */ -.label-help-wrapper { - margin-bottom: 0.5rem; -} - -.label-help-wrapper .scheduler-form-label { - margin-bottom: 2px; -} - -.label-help-wrapper .scheduler-form-help { - margin-top: 0; - margin-bottom: 0; -} - -/* Scheduler detail header styling */ -.scheduler-detail-header { +.select-with-custom { display: flex; - justify-content: flex-start; - align-items: center; - flex-wrap: wrap; - gap: 10px; + flex-direction: column; + gap: 0.5rem; width: 100%; } -.scheduler-detail-header .scheduler-detail-title { - margin-right: auto; -} - -/* Responsive adjustments for headers */ -@media (max-width: 768px) { - .scheduler-form-header { - flex-direction: column; - align-items: flex-start; - gap: 1rem; - } - - .scheduler-form-actions { - align-self: flex-end; - } - - .scheduler-detail-header { - flex-direction: row; /* Keep in row even on mobile */ - align-items: center; - flex-wrap: wrap; - gap: 0.5rem; - } - - .scheduler-detail-header .btn { - margin-left: auto; /* Push to right edge */ - } - - .scheduler-form-field { - grid-template-columns: 1fr; - gap: 0.5rem; - } -} - -/* Input group for token field with generate button */ -.input-group { - display: flex; - gap: 8px; +.select-with-custom select { width: 100%; } -.input-group input[type="text"] { - flex: 1; - min-width: 0; /* Allows the input to shrink below its content size */ -} - -/* Specific styling for the Generate button in token field */ -.input-group .scheduler-task-action { - white-space: nowrap; - padding: 4px 10px; - width: auto; - height: auto; - background-color: var(--color-secondary); - font-size: 0.9rem; -} - -.input-group .scheduler-task-action:hover { - background-color: var(--color-accent); - color: var(--color-bg); -} - -/* Ensure parent container allows proper flow */ -.scheduler-form-field .input-group { - max-width: 100%; - overflow: hidden; -} - -/* Adjustments for mobile */ -@media (max-width: 768px) { - .input-group .scheduler-task-action { - padding: 4px 8px; - font-size: 0.8rem; - } -} - -@media (max-width: 480px) { - .input-group { - flex-direction: column; - } - - .input-group .scheduler-task-action { - align-self: flex-start; - } -} - -/* Task state selector styling */ -.scheduler-state-selector { - display: flex; - gap: 10px; - flex-wrap: wrap; +.custom-model-input { + width: 100%; + padding: 0.5rem; + border: 1px solid var(--color-secondary); + border-radius: 0.25rem; + font-size: 0.875rem; + background-color: var(--color-background); + color: var(--color-text); } -.scheduler-state-selector .scheduler-status-badge { - cursor: pointer; - transition: all 0.2s ease; +.custom-model-input::placeholder { + color: var(--color-text-muted); opacity: 0.7; - border: 1px solid transparent; -} - -.scheduler-state-selector .scheduler-status-badge:hover { - opacity: 0.9; - transform: scale(1.05); } -.scheduler-status-selected { - opacity: 1 !important; - transform: scale(1.05); - box-shadow: 0 0 0 2px var(--color-bg), 0 0 0 4px var(--color-border); - border: 2px solid var(--color-border) !important; +.custom-model-input:focus { outline: none; + border-color: var(--color-primary); + box-shadow: 0 0 0 2px rgba(var(--color-primary-rgb), 0.2); } - -/* Make status badges in selector more prominent */ -.scheduler-state-selector .scheduler-status-idle, -.scheduler-state-selector .scheduler-status-running, -.scheduler-state-selector .scheduler-status-disabled, -.scheduler-state-selector .scheduler-status-error { - font-weight: 600; - padding: 6px 12px; -} - -.light-mode .scheduler-status-selected { - box-shadow: 0 0 0 2px var(--color-bg-light), 0 0 0 4px var(--color-accent); -} - -/* State explanation styling */ -.scheduler-state-explanation { - margin-top: 10px; - font-size: 0.85rem; - color: var(--color-text-secondary); - line-height: 1.4; - min-height: 1.4em; /* Ensure consistent height even when changing descriptions */ - transition: all 0.2s ease; -} - -.scheduler-state-explanation span { - display: block; - padding: 4px 8px; - background-color: rgba(0, 0, 0, 0.05); - border-radius: 4px; - margin-top: 8px; -} - -.light-mode .scheduler-state-explanation span { - background-color: rgba(255, 255, 255, 0.3); -} - -/* Schedule Builder (for scheduled tasks) */ -.scheduler-schedule-builder { - display: grid; - grid-template-columns: repeat(5, 1fr); - gap: 10px; - width: 100%; - margin-bottom: 10px; -} - -.scheduler-schedule-field { - display: flex; - flex-direction: column; -} - -.scheduler-schedule-label { - font-size: 0.8rem; - margin-bottom: 5px; - color: var(--color-text); - opacity: 0.8; -} - -/* Plan Builder (for planned tasks) */ -.scheduler-plan-builder { - width: 100%; - margin-bottom: 10px; - border: 1px solid var(--color-border); - border-radius: 8px; - padding: 10px; - background-color: rgba(0, 0, 0, 0.2); -} - -.scheduler-plan-todo { - display: flex; - flex-direction: column; -} - -.scheduler-plan-label { - font-size: 0.9rem; - margin-bottom: 10px; - color: var(--color-text); - font-weight: bold; -} - -.scheduler-todo-list { - display: flex; - flex-direction: column; - gap: 10px; - margin-top: 8px; - max-height: 200px; - overflow-y: auto; -} - -.scheduler-todo-item { - display: flex; - align-items: center; - justify-content: space-between; - background-color: var(--color-background); - border-radius: 6px; - padding: 8px 12px; - border: 1px solid var(--color-border); - box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05); -} - -.scheduler-todo-item span { - flex: 1; - font-size: 14px; -} - -.scheduler-add-todo { - margin-top: 12px; - display: flex; - gap: 8px; - align-items: center; -} - -.scheduler-add-todo input[type="datetime-local"] { - flex: 1; - min-width: 0; - padding: 8px 12px; - border-radius: 6px; - border: 1px solid var(--color-border); - background-color: var(--color-background); - color: var(--color-text); -} - -.scheduler-add-todo-button { - display: flex; - align-items: center; - justify-content: center; - background-color: var(--color-accent); - color: white; - border: none; - border-radius: 6px; - padding: 8px 12px; - cursor: pointer; - transition: background-color 0.2s ease; - font-weight: 500; -} - -.scheduler-add-todo-button:hover { - background-color: var(--color-accent-dark); -} - -.scheduler-todo-remove { - display: flex; - align-items: center; - justify-content: center; - background-color: transparent; - color: var(--color-text); - border: none; - border-radius: 4px; - width: 24px; - height: 24px; - cursor: pointer; - transition: background-color 0.2s ease; - margin-left: 8px; -} - -.scheduler-todo-remove:hover { - background-color: var(--color-accent-light); - color: var(--color-accent-dark); -} - -.light-mode .scheduler-todo-item { - background-color: var(--color-background-light); - border-color: var(--color-border-light); -} - -.light-mode .scheduler-todo-remove:hover { - background-color: #e0e0e0; - color: #d32f2f; -} - -.scheduler-empty-plan { - padding: 12px; - color: var(--color-text-muted); - font-style: italic; - text-align: center; - border: 1px dashed var(--color-border); - border-radius: 6px; - margin-top: 8px; -} - -/* Responsive design for plan builder */ -@media (max-width: 768px) { - .scheduler-add-todo { - flex-direction: column; - } -} - -/* Token field (for ad-hoc tasks) */ diff --git a/webui/css/tables.css b/webui/css/tables.css new file mode 100644 index 0000000000..7d5e75fe86 --- /dev/null +++ b/webui/css/tables.css @@ -0,0 +1,469 @@ +/* ========================================================================== + Tables CSS - Shared styles for data tables and list views + Used by: Memory Dashboard, Task Scheduler, and similar components + ========================================================================== */ + +/* ========================================================================== + FILTER HEADER + Flex-based filter layout for search/filter controls + ========================================================================== */ +.filters-header { + display: flex; + flex-wrap: wrap; + gap: 0.75rem; + align-items: center; + padding: 1rem; + background: var(--color-panel); + border: 1px solid var(--color-border); + border-radius: 8px; + margin-bottom: 0; +} + +.filter-group-inline { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.filter-group-inline label { + font-size: 0.85rem; + font-weight: 600; + color: var(--color-text); + opacity: 0.8; + white-space: nowrap; +} + +.filter-group-inline select, +.filter-group-inline input { + border: 1px solid var(--color-border); + min-width: 0; +} + +.filter-group-inline select:focus, +.filter-group-inline input:focus { + outline: none; + border-color: var(--color-primary); + background: var(--color-input-focus); +} + +/* Filter threshold (range slider with label) */ +.filter-threshold { + display: flex; + flex-direction: column; + gap: 0.25rem; +} + +.filter-threshold label { + font-size: 0.85rem; + opacity: 0.8; +} + +/* ========================================================================== + STATUS BAR + Status info and pagination controls + ========================================================================== */ +.data-status-bar { + display: flex; + justify-content: space-between; + align-items: center; + padding: 0.75rem 1rem; + background: var(--color-panel); + border: 1px solid var(--color-border); + border-top: none; + font-size: 0.9rem; +} + +.data-status-bar-top { + border-top: 1px solid var(--color-border); + border-bottom: none; + border-radius: 8px 8px 0 0; +} + +.data-status-bar-bottom { + border-top: 1px solid var(--color-border); + border-radius: 0 0 8px 8px; +} + +.data-status-bar-standalone { + border: 1px solid var(--color-border); + border-radius: 8px; +} + +.status-info { + display: flex; + align-items: center; + gap: 0.5rem; + flex-wrap: wrap; +} + +.status-item { + display: flex; + align-items: center; + gap: 0.25rem; +} + +.status-item strong { + color: var(--color-primary); + font-weight: 600; +} + +.status-item .material-symbols-outlined { + font-size: 18px; + opacity: 0.6; +} + +.status-separator { + color: var(--color-border); + margin: 0 0.25rem; +} + +/* ========================================================================== + PAGINATION + Page navigation controls + ========================================================================== */ +.status-pagination { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.page-input-group { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.page-input-group label { + opacity: 0.8; +} + +.page-total { + color: var(--color-text); + opacity: 0.8; + font-size: 0.85rem; + white-space: nowrap; +} + +.page-total strong { + color: var(--color-primary); + font-weight: 600; +} + +.page-input { + width: 60px; + padding: 0.25rem 0.5rem; + text-align: center; + border: 1px solid var(--color-border); + background: var(--color-input); + color: var(--color-text); + border-radius: 4px; + font-size: 0.85rem; + height: 28px; +} + +.page-input:focus { + outline: none; + border-color: var(--color-primary); + background: var(--color-input-focus); +} + +/* Remove spinner arrows from number input */ +.page-input::-webkit-inner-spin-button, +.page-input::-webkit-outer-spin-button { + -webkit-appearance: none; + margin: 0; +} + +.page-input[type=number] { + -moz-appearance: textfield; + appearance: textfield; +} + +/* ========================================================================== + TABLE CONTAINER & BASE STYLES + Wrapper and base table styling + ========================================================================== */ +.data-table-container { + border: 1px solid var(--color-border); + border-top: none; + border-radius: 0 0 8px 8px; + overflow: hidden; +} + +.data-table-container-standalone { + border-top: 1px solid var(--color-border); + border-radius: 8px; +} + +.table-wrapper { + overflow-x: auto; +} + +.data-table { + width: 100%; + border-collapse: collapse; + table-layout: auto; + font-size: 0.9rem; +} + +.data-table th, +.data-table td { + padding: 0.75rem; + text-align: left; + border-bottom: 1px solid var(--color-border); + vertical-align: middle; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.data-table th { + background: var(--color-panel); + font-weight: 600; + color: var(--color-text); +} + +.data-table th.sortable { + cursor: pointer; + user-select: none; +} + +.data-table th.sortable:hover { + background: var(--color-bg-tertiary); +} + +.data-table tbody tr { + cursor: pointer; + transition: background-color 0.2s ease; +} + +.data-table tbody tr:hover { + background-color: rgba(255, 255, 255, 0.03); +} + +.light-mode .data-table tbody tr:hover { + background-color: rgba(0, 0, 0, 0.02); +} + +.data-table tbody tr:last-child td { + border-bottom: none; +} + +/* Sticky header for scrollable tables */ +.data-table th.sticky { + position: sticky; + top: 0; + z-index: 10; + border-bottom: 2px solid var(--color-border); +} + +/* ========================================================================== + SORT INDICATOR + Arrow indicator for sortable columns + ========================================================================== */ +.sort-indicator { + display: inline-block; + margin-left: 4px; + transition: transform 0.2s ease; +} + +.sort-indicator.desc { + transform: rotate(180deg); +} + +/* ========================================================================== + EMPTY & LOADING STATES + Empty results and loading indicators + ========================================================================== */ +.data-empty-state { + text-align: center; + padding: 3rem 1rem; + color: var(--color-text); + opacity: 0.8; + background: var(--color-panel); + border: 1px solid var(--color-border); + border-top: none; + border-radius: 0 0 8px 8px; +} + +.data-empty-state-standalone { + border-top: 1px solid var(--color-border); + border-radius: 8px; +} + +.data-empty-state .btn { + display: inline-flex; + align-items: center; + gap: 0.5rem; + margin-top: 1rem; +} + +.loading-state { + text-align: center; + padding: 2rem; + color: var(--color-text); + opacity: 0.8; + background: var(--color-panel); + border: 1px solid var(--color-border); + border-radius: 8px; + margin: 1rem 0; + display: grid; + align-items: center; +} + +.error-state { + text-align: center; + padding: 2rem; + color: var(--color-accent); + border: 1px solid var(--color-accent); + background: var(--color-panel); + border-radius: 8px; + margin: 1rem 0; +} + +.init-message { + text-align: center; + padding: 2rem; + color: var(--color-primary); + border: 1px solid var(--color-primary); + background: var(--color-panel); + border-radius: 8px; + margin: 1rem 0; +} + +.loading-spinner { + width: 24px; + height: 24px; + min-width: 24px; + min-height: 24px; + max-width: 24px; + max-height: 24px; + border: 3px solid var(--color-border); + border-top-color: var(--color-primary); + border-radius: 50%; + animation: spin 1s linear infinite; + margin: 0 auto 0.5rem; + flex-shrink: 0; + flex-grow: 0; + display: inline-block; + position: relative; + box-sizing: border-box; +} + +@keyframes spin { + to { + transform: rotate(360deg); + } +} + +/* ========================================================================== + MASS ACTION TOOLBAR + Bulk action controls for selected items + ========================================================================== */ +.mass-action-toolbar { + display: flex; + justify-content: space-between; + align-items: center; + padding: 1rem; + background: var(--color-panel); + margin: 0; + animation: slideDown 0.3s ease; +} + +.selection-info { + font-weight: 600; +} + +.mass-actions { + display: flex; + gap: 0.5rem; +} + +.btn-mass { + align-items: center; + border: 1px solid var(--color-border); + gap: 0.25rem; + display: flex; + background: var(--color-background); + color: var(--color-text); +} + +.btn-mass .material-symbols-outlined { + font-size: 18px; +} + +.btn-mass:hover { + border-color: var(--color-primary); + color: var(--color-primary); + background: var(--color-panel); +} + +.btn-mass.delete:hover { + border-color: var(--color-accent); + color: var(--color-accent); +} + +@keyframes slideDown { + from { + opacity: 0; + transform: translateY(-10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +/* ========================================================================== + RESPONSIVE STYLES + Mobile breakpoints for tables and status bars + ========================================================================== */ +@media (max-width: 870px) { + .filters-header { + flex-direction: column; + align-items: stretch; + } + + .filter-group-inline { + width: 100%; + } + + .filter-group-inline select, + .filter-group-inline input { + flex: 1; + } + + .data-status-bar { + flex-direction: column; + gap: 0.75rem; + } + + .status-info { + width: 100%; + justify-content: center; + } + + .status-pagination { + width: 100%; + justify-content: center; + padding-top: 0.5rem; + border-top: 1px solid var(--color-border); + } + + .data-table th, + .data-table td { + padding: 0.5rem 0.25rem; + font-size: 0.85rem; + } + + .mass-action-toolbar { + flex-direction: column; + gap: 0.75rem; + } + + .mass-actions { + flex-wrap: wrap; + justify-content: center; + } +} + diff --git a/webui/index.css b/webui/index.css index 07e6fe74f6..c0e29d5c5a 100644 --- a/webui/index.css +++ b/webui/index.css @@ -10,42 +10,52 @@ :root { /* Dark mode */ --color-background-dark: #131313; - --color-text-dark: #d4d4d4; + --color-text-dark: #ffffff; + --color-text-muted-dark: #d4d4d4e4; --color-primary-dark: #737a81; --color-secondary-dark: #656565; --color-accent-dark: #cf6679; --color-message-bg-dark: #2d2d2d; + --color-highlight-dark: #2b5ab9; --color-message-text-dark: #e0e0e0; --color-panel-dark: #1a1a1a; --color-border-dark: #444444a8; --color-input-dark: #131313; --color-input-focus-dark: #101010; + --color-chat-background-dark: #212121; /* Light mode */ - --color-background-light: #dbdbdb; + --color-background-light: #fafafa; --color-text-light: #333333; + --color-text-muted-light: #333333e4; --color-primary-light: #384653; --color-secondary-light: #e8eaf6; --color-accent-light: #b00020; --color-message-bg-light: #ffffff; + --color-highlight-light: #2563eb; --color-message-text-light: #333333; --color-panel-light: #f0f0f0; - --color-border-light: #e0e0e0c7; + --color-border-light: #bdbdbdcf; --color-input-light: #e4e4e4; --color-input-focus-light: #dadada; + --color-chat-background-light: #fafafaf3; /* Default to dark mode */ --color-background: var(--color-background-dark); --color-text: var(--color-text-dark); + --color-text-muted: var(--color-text-muted-dark); --color-primary: var(--color-primary-dark); --color-secondary: var(--color-secondary-dark); --color-accent: var(--color-accent-dark); --color-message-bg: var(--color-message-bg-dark); + --color-highlight: var(--color-highlight-dark); --color-message-text: var(--color-message-text-dark); --color-panel: var(--color-panel-dark); --color-border: var(--color-border-dark); --color-input: var(--color-input-dark); --color-input-focus: var(--color-input-focus-dark); + --color-background-hover: color-mix(in srgb, var(--color-border) 50%, transparent); + --color-chat-background: var(--color-chat-background-dark); /* Spacing variables */ --spacing-xxs: 0.15rem; @@ -76,6 +86,7 @@ .light-mode { --color-background: var(--color-background-light); --color-text: var(--color-text-light); + --color-text-muted: var(--color-text-muted-light); --color-primary: var(--color-primary-light); --color-secondary: var(--color-secondary-light); --color-accent: var(--color-accent-light); @@ -85,7 +96,10 @@ --color-border: var(--color-border-light); --color-input: var(--color-input-light); --color-input-focus: var(--color-input-focus-light); -} + --color-background-hover: color-mix(in srgb, var(--color-border) 50%, transparent); + --color-chat-background: var(--color-chat-background-light); + } + /* Reset and Base Styles */ body, @@ -115,294 +129,251 @@ p:last-child { } body, -#left-panel, #chat-input, .message, .config-button, .switch-label { -webkit-transition: background-color 0.3s, color 0.3s, border-color 0.3s; transition: background-color 0.3s, color 0.3s, border-color 0.3s; - color: var(--color-text); + color: var(--color-text) !important; } -img { - user-drag: none; /* Safari & old Chrome */ - -webkit-user-drag: none; - user-select: none; /* Prevent selection for good measure */ - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - pointer-events: auto; /* Allow clicks, just no drag */ +/* Bootstrap-like tooltips */ +.tooltip { + position: absolute; + /* Above custom modals (base z-index: 3000 in `webui/js/modals.js`) */ + z-index: 5000; + display: block; + margin: 0; + font-family: var(--font-family-main); + font-style: normal; + font-weight: 400; + line-height: 1.2; + text-align: left; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + white-space: normal; + opacity: 0; + pointer-events: none; + transition: opacity 0.08s ease-in-out; } -/* Layout */ -.container { - display: -webkit-flex; - display: flex; - height: 100%; +.tooltip.show { + opacity: 1; } -.panel { - display: -webkit-flex; - display: flex; - height: 100%; - overflow: auto; - -webkit-scroll-behavior: smooth; - scroll-behavior: smooth; +.tooltip .tooltip-inner { + max-width: 240px; + padding: 0.35rem 0.6rem; + color: var(--color-text); + background-color: var(--color-secondary); + border-radius: 0.375rem; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.25); + font-size: 0.75rem; } -/* Left Panel */ -#left-panel { - background-color: var(--color-panel); - border-right: 1px solid var(--color-border); - box-sizing: border-box; - display: -webkit-flex; - display: flex; - flex-direction: column; - justify-content: space-between; - -webkit-transition: all var(--transition-speed) ease-in-out; - transition: all var(--transition-speed) ease-in-out; - width: 250px; - min-width: 250px; - color: var(--color-text); - box-shadow: 1px 0 5px rgba(0, 0, 0, 0.3); - user-select: none; +.tooltip .tooltip-arrow { + position: absolute; + width: 0.8rem; + height: 0.4rem; } -#left-panel.hidden { - margin-left: -250px; +.tooltip .tooltip-arrow::before { + content: ""; + position: absolute; + border-color: transparent; + border-style: solid; } -.left-panel-top { - flex: 1; - display: -webkit-flex; - display: flex; - flex-direction: column; - min-height: 0; - overflow: hidden; - margin-top: 3.5rem; - padding: var(--spacing-md) var(--spacing-md) 0 var(--spacing-md); +.bs-tooltip-top .tooltip-arrow { + bottom: 0; + left: 50%; + transform: translateX(-50%); } -.left-panel-top::-webkit-scrollbar { - width: 0px; +.bs-tooltip-top .tooltip-arrow::before { + border-width: 0.4rem 0.4rem 0; + border-top-color: var(--color-secondary); + top: 0; } -.left-panel-top { - scrollbar-width: none; - -ms-overflow-style: none; +.bs-tooltip-bottom .tooltip-arrow { + top: 0; + left: 50%; + transform: translateX(-50%); } -#status-section, -.config-section:not(#chats-section) { - flex-shrink: 0; +.bs-tooltip-bottom .tooltip-arrow::before { + border-width: 0 0.4rem 0.4rem; + border-bottom-color: var(--color-secondary); + top: 0; } -.left-panel-bottom { - position: relative; - flex-shrink: 0; +.bs-tooltip-start .tooltip-arrow, +.bs-tooltip-left .tooltip-arrow { + right: 0; + top: 50%; + width: 0.4rem; + height: 0.8rem; + transform: translateY(-50%); } -/* Sidebar Toggle Button */ -.toggle-sidebar-button { - height: 2.6rem; - width: 2.6rem; - background-color: var(--color-background); - border: 0.1rem solid var(--color-border); - border-radius: var(--spacing-xs); - color: var(--color-text); - opacity: 0.8; - cursor: pointer; - left: var(--spacing-md); - padding: 0.47rem 0.56rem; - position: absolute; - top: var(--spacing-md); - z-index: 999; - -webkit-transition: all var(--transition-speed) ease-in-out; - transition: all var(--transition-speed) ease-in-out; +.bs-tooltip-start .tooltip-arrow::before, +.bs-tooltip-left .tooltip-arrow::before { + border-width: 0.4rem 0 0.4rem 0.4rem; + border-left-color: var(--color-secondary); + left: 0; } -.toggle-sidebar-button:hover { - background-color: var(--color-secondary); - opacity: 1; +.bs-tooltip-end .tooltip-arrow, +.bs-tooltip-right .tooltip-arrow { + left: 0; + top: 50%; + width: 0.4rem; + height: 0.8rem; + transform: translateY(-50%); } -.toggle-sidebar-button:active { - opacity: 0.5; +.bs-tooltip-end .tooltip-arrow::before, +.bs-tooltip-right .tooltip-arrow::before { + border-width: 0.4rem 0.4rem 0.4rem 0; + border-right-color: var(--color-secondary); + right: 0; } -#sidebar-hamburger-svg { - -webkit-transition: all var(--transition-speed) ease; - transition: all var(--transition-speed) ease; +/* Bootstrap-like tooltips */ +.tooltip { + position: absolute; + /* Above custom modals (base z-index: 3000 in `webui/js/modals.js`) */ + z-index: 5000; + display: block; + margin: 0; + font-family: var(--font-family-main); + font-style: normal; + font-weight: 400; + line-height: 1.2; + text-align: left; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + white-space: normal; + opacity: 0; + pointer-events: none; + transition: opacity 0.08s ease-in-out; } -.toggle-sidebar-button:active #sidebar-hamburger-svg { - -webkit-transform: scaleY(0.8); - transform: scaleY(0.8); +.tooltip.show { + opacity: 1; } -.switch-label { - margin-right: 0.5rem; +.tooltip .tooltip-inner { + max-width: 240px; + padding: 0.35rem 0.6rem; + color: var(--color-text); + background-color: var(--color-secondary); + border-radius: 0.375rem; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.25); + font-size: 0.75rem; } -/* Chats container */ -.chat-container { - display: flex; - align-items: center; - position: relative; - width: 100%; - min-height: 30px; - gap: 0; +.tooltip .tooltip-arrow { + position: absolute; + width: 0.8rem; + height: 0.4rem; } -/* Update the chat-list-button padding to accommodate the vertical layout */ -.chat-list-button { - display: block; - width: 100%; - padding: 8px 5px; - cursor: pointer; - overflow: hidden; - position: relative; - border-radius: 4px; - transition: background-color 0.2s ease-in-out; +.tooltip .tooltip-arrow::before { + content: ""; + position: absolute; + border-color: transparent; + border-style: solid; } -/* Add some more padding to the list items to accommodate the vertical layout */ -.chat-list-button.has-task-container { - padding-top: 6px; - padding-bottom: 6px; +.bs-tooltip-top .tooltip-arrow { + bottom: 0; + left: 50%; + transform: translateX(-50%); } -/* Subtle background on hover for the entire row */ -.chat-list-button:hover { - background-color: rgba(255, 255, 255, 0.03); +.bs-tooltip-top .tooltip-arrow::before { + border-width: 0.4rem 0.4rem 0; + border-top-color: var(--color-secondary); + top: 0; } -.light-mode .chat-list-button:hover { - background-color: rgba(0, 0, 0, 0.02); +.bs-tooltip-bottom .tooltip-arrow { + top: 0; + left: 50%; + transform: translateX(-50%); } -.chat-name { - display: inline-block; - max-width: 160px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; - cursor: pointer; - padding: 3px 8px; - border-radius: 4px; - transition: background-color 0.2s; - margin-right: 60px; /* Make space for buttons */ - font-size: var(--font-size-small); /* Match config button font size */ +.bs-tooltip-bottom .tooltip-arrow::before { + border-width: 0 0.4rem 0.4rem; + border-bottom-color: var(--color-secondary); + top: 0; } -/* Add a nice hover effect to just the chat name */ -.chat-name:hover { - background-color: rgba(255, 255, 255, 0.1); - text-decoration: none; +.bs-tooltip-start .tooltip-arrow, +.bs-tooltip-left .tooltip-arrow { + right: 0; + top: 50%; + width: 0.4rem; + height: 0.8rem; + transform: translateY(-50%); } -.light-mode .chat-name:hover { - background-color: rgba(0, 0, 0, 0.05); +.bs-tooltip-start .tooltip-arrow::before, +.bs-tooltip-left .tooltip-arrow::before { + border-width: 0.4rem 0 0.4rem 0.4rem; + border-left-color: var(--color-secondary); + left: 0; } -.chats-list-container { - flex: 1; - min-height: 0; - overflow-y: auto; - scroll-behavior: smooth; - /* Mask */ - mask-image: linear-gradient( - to bottom, - black calc(100% - 20px), - transparent 100% - ); - -webkit-mask-image: linear-gradient( - to bottom, - black calc(100% - 20px), - transparent 100% - ); - /* Fallback for browsers that do not support mask-image */ - background: linear-gradient(to bottom, calc(100% - 20px), transparent 100%); - /* Add padding to account for fade */ - padding-bottom: 20px; - scrollbar-width: thin; - -ms-overflow-style: auto; -} - -.chats-list-container::-webkit-scrollbar { - width: 5px; -} - -.chats-list-container::-webkit-scrollbar-track { - background: rgba(0, 0, 0, 0.2); - border-radius: 6px; +.bs-tooltip-end .tooltip-arrow, +.bs-tooltip-right .tooltip-arrow { + left: 0; + top: 50%; + width: 0.4rem; + height: 0.8rem; + transform: translateY(-50%); } -.chats-list-container::-webkit-scrollbar-thumb { - background-color: var(--color-border); - border-radius: 6px; +.bs-tooltip-end .tooltip-arrow::before, +.bs-tooltip-right .tooltip-arrow::before { + border-width: 0.4rem 0.4rem 0.4rem 0; + border-right-color: var(--color-secondary); + right: 0; } -.chats-list-container::-webkit-scrollbar-thumb:hover { - background-color: var(--color-border); +img { + user-drag: none; /* Safari & old Chrome */ + -webkit-user-drag: none; + user-select: none; /* Prevent selection for good measure */ + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + pointer-events: auto; /* Allow clicks, just no drag */ } -/* Chats Section */ -#chats-section { +/* Layout */ +.container { display: -webkit-flex; display: flex; - flex-direction: column; - min-height: 0; - flex: 1; - margin-top: 0.5rem; + height: 100%; } -/* Preferences */ -.pref-header { +.panel { display: -webkit-flex; display: flex; - justify-content: space-between; - align-items: center; - cursor: pointer; - user-select: none; - font-size: var(--font-size-normal); - margin: 0.28rem 0 0.6rem 0; -} - -/* Arrow icon */ -.arrow-icon { - flex-shrink: 0; - -webkit-transition: transform var(--transition-speed) ease-in-out; - transition: transform var(--transition-speed) ease-in-out; - margin-left: 0.5rem; - width: 16px; - height: 16px; - transform: rotate(90deg); -} - -.arrow-icon.rotated { - -webkit-transform: rotate(-90deg); - transform: rotate(-90deg); -} - -.pref-section { - font-size: var(--font-size-small); - padding: 0.6rem var(--spacing-md) 0.05rem var(--spacing-md); -} - -/* Collapse transition */ -.pref-section [x-cloak] { - display: none; -} - -/* Version */ -.version-info { - line-height: 0.8rem; - position: relative; - margin: 0 var(--spacing-md) 1rem var(--spacing-md); - padding-top: 10px; - border-top: 1px solid var(--color-border); + height: 100%; + overflow: auto; + -webkit-scroll-behavior: smooth; + scroll-behavior: smooth; } /* Right Panel */ @@ -416,9 +387,23 @@ img { transition: margin-left var(--transition-speed) ease-in-out; } +/* Scrollbar styles for right panel */ #right-panel.expanded { margin-left: 0; } +div#right-panel::-webkit-scrollbar { + width: 6px; +} +div#right-panel::-webkit-scrollbar-track { + background: transparent; +} +div#right-panel::-webkit-scrollbar-thumb { + background-color: rgba(155, 155, 155, 0.3); + border-radius: 6px; +} +div#right-panel::-webkit-scrollbar-thumb:hover { + background-color: rgba(155, 155, 155, 0.5); +} #time-date-container { z-index: 1000; @@ -432,14 +417,16 @@ img { #time-date { color: var(--color-text); - font-size: var(--font-size-normal); text-align: right; - line-height: 1.1; + line-height: 1.2; } #user-date { - font-size: var(--font-size-small); - opacity: 0.6; + font-size: 0.7rem; + opacity: 0.5; + font-weight: 400; + display: block; + margin-top: 2px; } /* Typography */ @@ -461,110 +448,6 @@ h4 { margin: auto 0; } */ -#a0version { - color: var(--color-text); - opacity: 0.7; - font-size: 0.7rem; - user-select: all; -} - - -/* Logo Container */ -#logo-container { - display: -webkit-flex; - display: flex; - align-items: center; - justify-content: space-between; - position: fixed; - margin-left: 4.6rem; - margin-top: var(--spacing-md); - z-index: 999; - -webkit-transition: margin-left var(--transition-speed) ease-in-out; - transition: margin-left var(--transition-speed) ease-in-out; -} - -#logo-container a { - color: inherit; - text-decoration: none; -} - -#logo-container img { - border-radius: var(--spacing-xs); - width: auto; - height: 2.6rem; - -webkit-transition: filter 0.3s ease; - transition: filter 0.3s ease; -} - -#progress-bar-box { - background-color: var(--color-panel); - padding: var(--spacing-sm) var(--spacing-md); - padding-bottom: 0; - display: flex; - justify-content: space-between; - z-index: 1001; -} - -#progress-bar-box h4 { - margin: 0; -} - -#progress-bar-h { - color: var(--color-primary); - display: flex; - align-items: left; - justify-content: flex-start; - height: 1.2em; - text-overflow: ellipsis; - white-space: nowrap; /* Added for text overflow */ - overflow: hidden; - font-weight: normal; -} - -#progress-bar-i { - font-weight: bold; - padding-right: 0.5em; - color: var(--color-secondary); -} - -.progress-bar h4 { - margin-left: 1em; - margin-right: 1.2em; -} - -.shiny-text { - background: linear-gradient( - to right, - var(--color-primary-dark) 20%, - var(--color-text) 40%, - var(--color-text) 60%, - var(--color-primary-dark) 60% - ); - background-size: 200% auto; - color: transparent; - -webkit-background-clip: text; - background-clip: text; - animation: shine 1s linear infinite; -} - -#right-panel.expanded #logo-container { - margin-left: 4.6rem; -} - - - -/* Input Section */ -#input-section { - position: relative; - background-color: var(--color-panel); - display: -webkit-flex; - display: flex; - flex-direction: column; - gap: var(--spacing-xs); - padding: 0.8rem var(--spacing-md) var(--spacing-sm) var(--spacing-sm); - align-items: start; - flex-shrink: 0; -} /* Preview section with unified tile system */ .preview-section { @@ -721,104 +604,9 @@ h4 { content: "⚠️"; } -/* Text input */ -#chat-input-container { - position: relative; - width: 100%; -} - -#chat-input { - background-color: var(--color-input); - border: 1px solid var(--color-border); - border-radius: 8px; - color: var(--color-text); - flex-grow: 1; - font-family: "Roboto Mono", monospace; - font-optical-sizing: auto; - -webkit-font-optical-sizing: auto; - font-size: 0.9rem; - max-height: 7rem; - min-height: 3.05rem; - width: 100%; - padding: 0.48rem 40px var(--spacing-sm) var(--spacing-sm); - margin-right: var(--spacing-xs); - overflow-y: auto; - scroll-behavior: smooth; - resize: none; - align-content: start; - background-clip: border-box; - border: 6px solid transparent; - transition: all 0.3s ease; - -webkit-transition: all 0.3s ease; -} - -#chat-input { - outline: 1px solid var(--color-border); -} - -#expand-button { - position: absolute; - top: 12px; - right: 10px; - background: transparent; - border: none; - cursor: pointer; - font-size: 1.2rem; - color: var(--color-text); - opacity: 0.4; - transition: opacity 0.2s; -} - -#expand-button:hover { - opacity: 0.7; -} - -#expand-button:active { - opacity: 1; -} - -#expand-button svg { - width: 1.3rem; - height: 1.3rem; -} - -#chat-input::-webkit-scrollbar { - width: 6px; - height: 6px; -} - -#chat-input::-webkit-scrollbar-track { - background: transparent; - margin: 4px 0; - border-radius: 6px; -} - -#chat-input::-webkit-scrollbar-thumb { - background-color: rgba(155, 155, 155, 0.5); - border-radius: 6px; - -webkit-transition: background-color 0.2s ease; - transition: background-color 0.2s ease; -} - -#chat-input::-webkit-scrollbar-thumb:hover { - background-color: rgba(155, 155, 155, 0.7); -} - -#chat-input:focus { - outline: 0.05rem solid rgba(155, 155, 155, 0.5); - font-size: 0.955rem; - padding-top: 0.45rem; - background-color: var(--color-input-focus); -} - -#chat-input::placeholder { - color: var(--color-text-muted); - opacity: 0.7; -} - -/* Config Section */ -.config-section > h4 { - margin-top: 0; +/* Config Section */ +.config-section > h4 { + margin-top: 0; } .config-list { @@ -829,93 +617,17 @@ h4 { .config-list li { align-items: center; - border-top: 1px solid var(--color-border); + /* border-top: 1px solid var(--color-border); */ display: -webkit-flex; display: flex; justify-content: space-between; - padding: 0.35rem 0; + padding: 0.28rem 0; } .config-list > *:first-child { border-top: 0px; } -#pref-list li { - opacity: 0.8; -} - -.config-button { - background-color: var(--color-background); - border: 0.1rem solid var(--color-border); - border-radius: var(--spacing-xs); - cursor: pointer; - display: inline; - font-family: "Rubik", Arial, Helvetica, sans-serif; - font-size: var(--font-size-small); - opacity: 0.8; - text-wrap: nowrap; - width: calc(50% - var(--spacing-xs)); - float: left; - margin: 0 var(--spacing-xs) var(--spacing-xs) 0; - padding: var(--spacing-sm) 0.75rem; - max-height: 2.3rem; - -webkit-transition: all var(--transition-speed), transform 0.1s ease-in-out; - transition: all var(--transition-speed), transform 0.1s ease-in-out; -} - -.config-button:hover { - background-color: var(--color-secondary); - opacity: 1; -} - -.config-button:active { - opacity: 0.5; -} - -#settings { - display: flex; - align-items: center; -} - -.edit-button { - background-color: transparent; - border: 1px solid var(--color-border); - border-radius: 0.1875rem; - color: var(--color-primary); - cursor: pointer; - padding: 0.125rem 0.5rem; - -webkit-transition: all var(--transition-speed) ease-in-out; - transition: all var(--transition-speed) ease-in-out; - width: 2rem; - height: 2rem; -} - -.edit-button:hover { - border-color: var(--color-primary); - background-color: #32455690; -} - -.edit-button:active { - background-color: #131a2090; - color: rgba(253, 253, 253, 0.35); -} - -/* Input section layout */ -#input-section { - display: flex; - flex-direction: column; - gap: var(--spacing-xs); - padding: var(--spacing-sm) var(--spacing-md) var(--spacing-sm) 0.8rem; - background-color: var(--color-panel); - z-index: 1001; -} - -/* Top row styling */ -.input-row { - display: flex; - align-items: center; - gap: var(--spacing-xs); -} /* Attachment icon */ .attachment-wrapper { position: relative; @@ -942,15 +654,14 @@ h4 { /* Message attachments with unified tile system */ .attachments-container { margin-top: 0.75em; - display: grid; - grid-template-columns: repeat(auto-fit, minmax(120px, 1fr)); - gap: 12px; + display: flex; + flex-wrap: wrap; + gap: 8px; + justify-content: flex-end; padding: var(--spacing-sm); /* background-color: var(--color-input); */ border-radius: 8px; - max-width: 600px; /* Limits to ~5 columns at 120px each */ overflow: visible; - justify-items: end; } .attachment-item { @@ -1160,127 +871,6 @@ h4 { content: "⚠️"; } -/* Text input */ -.input-row { - width: 100%; - white-space: nowrap; -} - -/* with text buttons */ -.text-buttons-row { - width: 100%; - display: flex; - padding-top: var(--spacing-xs); - margin-left: var(--spacing-xs); -} - -.text-button { - background-color: transparent; - border: none; - border-radius: 5px; - color: var(--color-text); - font-family: "Rubik", Arial, Helvetica, sans-serif; - font-size: 0.6rem; - padding: 6px var(--spacing-sm); - cursor: pointer; - opacity: 0.8; - -webkit-transition: all 0.3s; - transition: all 0.3s; - display: flex; - align-items: center; - gap: var(--spacing-xs); /* space between icon and text */ -} - -.text-button:hover { - opacity: 1; - background-color: var(--color-secondary); - border-radius: 4px; -} - -.text-button:active { - opacity: 0.5; -} - -.text-button svg { - width: 14px; - height: 14px; - flex-shrink: 0; /* prevents SVG from shrinking */ -} - -.text-button p { - margin-block: 0; -} - -/* Chat buttons (Send and Mic) */ - -#chat-buttons-wrapper { - gap: var(--spacing-xs); - padding-left: var(--spacing-xs); -} - -.chat-button { - border: none; - border-radius: 50%; - color: var(--color-background); - cursor: pointer; - font-size: var(--font-size-normal); - height: 2.525rem; - width: 2.525rem; - margin: 0 0.18rem 0 0 var(--spacing-xs); - display: -webkit-flex; - display: flex; - align-items: center; - justify-content: center; - flex-shrink: 0; - flex-grow: 0; - min-width: 2.525rem; - -webkit-transition: all var(--transition-speed), transform 0.1s ease-in-out; - transition: all var(--transition-speed), transform 0.1s ease-in-out; -} - -#send-button { - background-color: #4248f1; -} - -#send-button:hover { - -webkit-transform: scale(1.05); - transform: scale(1.05); - transform-origin: center; - background-color: #353bc5; -} - -#send-button:active { - -webkit-transform: scale(1); - transform: scale(1); - transform-origin: center; - background-color: #2b309c; -} - -.chat-button svg { - width: 1.5rem; - height: 1.5rem; -} - -/* Microphone button */ -.chat-button.mic-inactive svg { - /* Add specific styles if needed */ -} - -/* Tooltip */ -.tooltip { - position: absolute; - bottom: 100%; - left: 50%; - transform: translateX(0%); - padding: 8px; - background-color: var(--color-secondary); - color: var(--color-text); - border-radius: 4px; - font-size: 12px; - white-space: nowrap; - z-index: 1002; -} - /* Image preview section */ .image-preview-section { display: flex; @@ -1381,21 +971,6 @@ input:checked + .slider:before { transform: translateX(1rem); } -#chat-buttons-wrapper { - line-height: 0.5rem; - display: -webkit-flex; - display: flex; -} - -/* Tooltip */ -.tooltip { - /* Already defined above */ -} - - - - - .status-icon { display: flex; justify-content: center; @@ -1431,24 +1006,8 @@ input:checked + .slider:before { } - - /* Media Queries */ @media (max-width: 640px) { - .text-buttons-row { - display: table; - gap: 0.1rem !important; - } - - .text-button { - max-height: 25px; - } - - .text-button p { - display: none; - } - - /* Responsive tiles for mobile */ .preview-section { grid-template-columns: repeat(auto-fill, minmax(90px, 1fr)); @@ -1457,10 +1016,8 @@ input:checked + .slider:before { } .attachments-container { - grid-template-columns: repeat(auto-fit, minmax(90px, 1fr)); - gap: 8px; + gap: 6px; padding: var(--spacing-xs); - max-width: 300px; /* Limits to ~3 columns at 90px each */ } .preview-item, @@ -1515,21 +1072,6 @@ input:checked + .slider:before { } @media (max-width: 640px) { - #chat-input { - min-height: 5.3rem; - align-content: start; - } - - #chat-buttons-wrapper { - display: flex; - gap: var(--spacing-xs); - padding: 0; - width: 3.5rem; - flex-wrap: wrap; - -webkit-transition: all 0.3s ease; - transition: all 0.3s ease; - } - .sidebar-overlay { display: none; position: fixed; @@ -1548,60 +1090,6 @@ input:checked + .slider:before { } @media (max-width: 768px) { - #left-panel { - position: fixed; - left: 0; - top: 0; - bottom: 0; - width: 250px !important; /* Force width */ - min-width: 250px; - z-index: 1003; - -webkit-transition: all var(--transition-speed) ease-in-out; - transition: all var(--transition-speed) ease-in-out; - } - - #left-panel.hidden { - margin-left: -250px; - } - - .toggle-sidebar-button { - position: fixed; - left: var(--spacing-md); - z-index: 1004; - } - - #logo-container { - margin-left: 4.6rem; - -webkit-transition: all 0.3s ease; - transition: all 0.3s ease; - } - - #right-panel.expanded #logo-container { - margin-left: 4.6rem; - } - - #input-section { - align-items: start; - } - - .text-buttons-row { - width: 90%; - display: flex; - padding-top: var(--spacing-xs); - gap: var(--spacing-xs); - white-space: pre-wrap; - } - - .text-button { - font-size: 0.6rem; - } - - .text-button svg { - width: 18px; - height: 18px; - flex-shrink: 0; /* prevents SVG from shrinking */ - } - /* .copy-button { display: none !important; } */ @@ -1641,14 +1129,6 @@ input:checked + .slider:before { } */ } -@media (min-width: 768px) { - #chat-buttons-wrapper { - flex-wrap: nowrap; - -webkit-flex-wrap: nowrap; - flex-wrap: nowrap; - } -} - @media (max-height: 600px) { /* consistent font sizing */ html { @@ -1662,16 +1142,6 @@ input:checked + .slider:before { -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } - - #chats-section { - min-height: 100%; - } - - .left-panel-top { - overflow-y: auto; - -webkit-scroll-behavior: smooth; - scroll-behavior: smooth; - } } @media screen and (orientation: landscape) { @@ -1709,42 +1179,6 @@ a:active { color: #f44336; } -.light-mode #left-panel { - box-shadow: 1px 0 25px rgba(0, 0, 0, 0.05); -} - -.light-mode .config-button { - background-color: var(--color-background); - color: #333333; -} - -.light-mode .config-button:hover { - background-color: #d6dae8; -} - -.light-mode .config-button:active { - background-color: #bdc0cb; -} - -.light-mode .edit-button { - border-color: var(--color-primary-light); - color: var(--color-primary-light); -} - -.light-mode .edit-button:hover { - background-color: #e4e7f0; -} - -.light-mode .edit-button:active { - background-color: #979fb9; - color: rgba(0, 0, 0, 0.35); -} - -.light-mode #progress-bar-i { - color: var(--color-border-dark); - opacity: 0.5; -} - .light-mode .slider { background-color: #f1f1f1; border: 1px solid #dddddd; @@ -1758,18 +1192,12 @@ a:active { background-color: #e6e6e6; } -.light-mode #logo-container img { - -webkit-filter: invert(100%) grayscale(100%); - filter: invert(100%) grayscale(100%); -} - .light-mode .extension { background: var(--color-primary); color: var(--color-background); opacity: 0.7; } - .dragdrop-overlay { position: fixed; top: 0; @@ -1852,269 +1280,20 @@ a:active { } } -/* Add to the existing .chat-actions class or create it */ -.chat-actions { +/* Settings Modal Styles */ +.settings-modal { + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + z-index: 1000; + background-color: rgba(0, 0, 0, 0.75); display: flex; - gap: 5px; - position: absolute; - right: 5px; - top: 50%; - transform: translateY(-50%); - z-index: 2; /* Ensure buttons are above the edit field */ - min-width: 70px; /* Ensure minimum width for the buttons */ - justify-content: flex-end; -} - -/* Tasks list container - similar to chats list */ -.tasks-list-container { - max-height: 300px; - overflow-y: auto; - margin-top: 10px; - padding-right: 5px; - border-radius: 5px; - position: relative; - scrollbar-width: thin; - -ms-overflow-style: auto; -} - -.tasks-list-container::-webkit-scrollbar { - width: 5px; -} - -.tasks-list-container::-webkit-scrollbar-track { - background: rgba(0, 0, 0, 0.2); - border-radius: 6px; -} - -.tasks-list-container::-webkit-scrollbar-thumb { - background-color: var(--color-border); - border-radius: 6px; -} - -.tasks-list-container::-webkit-scrollbar-thumb:hover { - background-color: var(--color-border); -} - -.task-name { - display: block; - width: 100%; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; - padding: 3px 0; - margin-left: 10px; - cursor: pointer; - border-radius: 4px; - transition: background-color 0.2s; - font-size: var(--font-size-small); - margin-bottom: 2px; -} - -.task-info-line { - display: flex; - justify-content: space-between; - align-items: center; - width: 100%; - margin-top: 2px; - margin-left: 5px; -} - -.task-name:hover { - background-color: rgba(255, 255, 255, 0.1); - text-decoration: none; -} - -.light-mode .task-name:hover { - background-color: rgba(0, 0, 0, 0.05); -} - -/* Dark mode overrides */ -.light-mode .tab.active { - color: var(--highlight-pink); -} - -.light-mode .tab.active::after { - background-color: var(--highlight-pink); - /* box-shadow: 0 0 8px var(--highlight-pink); */ -} -/* Tabs styling */ -.tabs-container { - width: 100%; - margin-bottom: 8px; /* Reduced spacing between tabs and list */ - padding: 0; - margin-top: 20px; /* Increased spacing from elements above */ -} - -.tabs { - display: flex; - width: 100%; - position: relative; - gap: 5px; - border-bottom: 3px solid var(--color-border); /* Thicker bottom line */ - justify-content: center; /* Center the tabs */ -} - -.tab { - padding: 8px 16px; - cursor: pointer; - position: relative; - color: var(--color-text); - border: 2px solid var(--color-border); - border-bottom: none; - border-radius: 8px 8px 0 0; - transition: all 0.3s ease; - background-color: var(--color-panel); - margin-bottom: -3px; /* Match the thicker border */ - z-index: 1; -} - -.tab:not(.active) { - opacity: 0.8; - border-bottom: 3px solid var(--color-border); - background-color: rgba(255, 255, 255, 0.03); -} - -.tab.active { - border-color: var(--color-border); - /* box-shadow: - 0 -4px 8px -2px var(--color-border), - 4px 0 8px -2px var(--color-border), - -4px 0 8px -2px var(--color-border); */ - font-weight: bold; - background-color: var(--color-panel); -} - -.light-mode .tab.active { - box-shadow: 0 -4px 8px -2px var(--color-border), - 4px 0 8px -2px var(--color-border), -4px 0 8px -2px var(--color-border); -} - -.light-mode .tab:not(.active) { - background-color: rgba(0, 0, 0, 0.03); -} - -/* Remove previous tab styling that conflicts */ -.tab.active::after { - display: none; -} - -/* Empty list message styling enhancement */ -.empty-list-message { - display: flex; - justify-content: center; - align-items: center; - height: 100px; - color: var(--color-secondary); - text-align: center; - opacity: 0.7; - font-style: italic; -} - -.light-mode .empty-list-message { - color: var(--color-secondary-light); -} - -/* Common scrollbar styling */ -::-webkit-scrollbar { - width: 5px; - height: 5px; -} - -::-webkit-scrollbar-track { - background: rgba(0, 0, 0, 0.2); - border-radius: 6px; -} - -::-webkit-scrollbar-thumb { - background-color: var(--color-border); - border-radius: 6px; - transition: background-color 0.2s ease; -} - -::-webkit-scrollbar-thumb:hover { - background-color: var(--color-border); -} - -::-webkit-scrollbar-thumb:active { - background-color: var(--color-border); -} - -/* Firefox scrollbar */ -* { - scrollbar-width: thin; - scrollbar-color: var(--color-border) rgba(0, 0, 0, 0.2); -} - -/* Light mode scrollbar */ -.light-mode ::-webkit-scrollbar-track { - background: rgba(200, 200, 200, 0.3); -} - -.light-mode ::-webkit-scrollbar-thumb { - background-color: var(--color-border); -} - -.light-mode ::-webkit-scrollbar-thumb:hover { - background-color: var(--color-border); -} - -.light-mode ::-webkit-scrollbar-thumb:active { - background-color: var(--color-border); -} - -.light-mode * { - scrollbar-color: var(--color-border) rgba(200, 200, 200, 0.3); -} - -/* Add specific styling for selected chat items */ -.chat-list-button.font-bold { - position: relative; - background-color: var(--color-border) 0.05; -} - -.chat-list-button.font-bold::before { - content: ""; - position: absolute; - left: 0; - top: 0; - height: 100%; - width: 3px; - background-color: var(--color-border); - border-top-left-radius: 3px; - border-bottom-left-radius: 3px; -} - -.light-mode .chat-list-button.font-bold { - background-color: var(--color-border) 0.05; -} - -.light-mode .chat-list-button.font-bold::before { - background-color: var(--color-border); -} - -/* Make sure the chat container has proper spacing */ -.chat-container, -.task-container { - display: flex; - align-items: center; - width: 100%; - justify-content: space-between; -} - -/* Settings Modal Styles */ -.settings-modal { - position: fixed; - top: 0; - left: 0; - right: 0; - bottom: 0; - z-index: 1000; - background-color: rgba(0, 0, 0, 0.75); - display: flex; - justify-content: center; - align-items: center; - overflow: auto; - padding: 24px; + justify-content: center; + align-items: center; + overflow: auto; + padding: 24px; } .settings-modal-close { @@ -2210,447 +1389,10 @@ nav ul li a img { color: var(--color-text-secondary); } -.scheduler-header { - display: flex; - justify-content: space-between; - align-items: center; - margin-bottom: 20px; -} - -.scheduler-header h2 { - margin: 0; - font-size: 1.2rem; - font-weight: 500; -} - -.scheduler-filters { - display: flex; - gap: 20px; - margin-bottom: 20px; - flex-wrap: wrap; -} - -.scheduler-filter-group { - display: flex; - align-items: center; - gap: 8px; -} - -.scheduler-filter-label { - font-weight: 500; - color: var(--color-text-secondary); -} - -.scheduler-filter-select { - padding: 6px 8px; - border-radius: 4px; - border: 1px solid var(--color-border); - background-color: var(--color-bg-secondary); - color: var(--color-text); -} - -.scheduler-task-list { - width: 100%; - border-collapse: collapse; - margin: 1rem 0; - font-size: 0.9rem; - overflow-x: auto; - table-layout: fixed; -} - -.scheduler-task-list th, -.scheduler-task-list td { - padding: 0.75rem; - text-align: left; - border-bottom: 1px solid var(--color-border); - vertical-align: middle; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.scheduler-task-list th:nth-child(1), /* Name */ -.scheduler-task-list td:nth-child(1) { - width: 25%; -} - -.scheduler-task-list th:nth-child(2), /* State */ -.scheduler-task-list td:nth-child(2) { - width: 10%; -} - -.scheduler-task-list th:nth-child(3), /* Type */ -.scheduler-task-list td:nth-child(3) { - width: 10%; -} - -.scheduler-task-list th:nth-child(4), /* Schedule */ -.scheduler-task-list td:nth-child(4) { - width: 20%; -} - -.scheduler-task-list th:nth-child(5), /* Last Run */ -.scheduler-task-list td:nth-child(5) { - width: 20%; -} - -.scheduler-task-list th:nth-child(6), /* Actions */ -.scheduler-task-list td:nth-child(6) { - width: 15%; - text-align: right; -} - -.scheduler-task-list th { - background-color: var(--color-bg-secondary); - font-weight: 500; - cursor: pointer; - user-select: none; -} - -.scheduler-task-list th:hover { - background-color: var(--color-bg-tertiary); -} - -.scheduler-task-actions { - display: flex; - gap: 8px; -} - -.scheduler-task-action { - padding: 4px; - background: none; - border: none; - color: var(--color-text-secondary); - cursor: pointer; - border-radius: 4px; -} - -.scheduler-task-action:hover { - background-color: var(--color-bg-tertiary); - color: var(--color-text); -} - -.scheduler-status-badge { - display: inline-block; - padding: 4px 8px; - border-radius: 4px; - font-size: 12px; - font-weight: 500; - text-transform: capitalize; - white-space: nowrap; -} - -.scheduler-status-idle { - background-color: rgba(0, 100, 0, 0.2); - color: #2a9d8f; /* Dark green that works with both light and dark themes */ - border: 1px solid rgba(42, 157, 143, 0.3); -} - -.scheduler-status-running { - background-color: rgba(0, 60, 120, 0.2); - color: #4361ee; /* Dark blue that works with both light and dark themes */ - border: 1px solid rgba(67, 97, 238, 0.3); -} - -.scheduler-status-disabled { - background-color: rgba(70, 70, 70, 0.2); - color: #6c757d; /* Dark grey that works with both light and dark themes */ - border: 1px solid rgba(108, 117, 125, 0.3); -} - -.scheduler-status-error { - background-color: rgba(120, 0, 0, 0.2); - color: #e63946; /* Dark red that works with both light and dark themes */ - border: 1px solid rgba(230, 57, 70, 0.3); -} - -/* Light mode adjustments */ -.light-mode .scheduler-status-idle { - background-color: rgba(42, 157, 143, 0.1); - color: #1a6f65; /* Darker green for light mode */ -} - -.light-mode .scheduler-status-running { - background-color: rgba(67, 97, 238, 0.1); - color: #2540b3; /* Darker blue for light mode */ -} - -.light-mode .scheduler-status-disabled { - background-color: rgba(108, 117, 125, 0.1); - color: #495057; /* Darker grey for light mode */ -} - -.light-mode .scheduler-status-error { - background-color: rgba(230, 57, 70, 0.1); - color: #c5283d; /* Darker red for light mode */ -} - -.scheduler-empty { - text-align: center; - padding: 40px 0; - color: var(--color-text-secondary); -} - -.scheduler-empty-icon { - font-size: 32px; - margin-bottom: 10px; -} - -.scheduler-empty-text { - margin-bottom: 20px; -} - -.scheduler-loading { - text-align: center; - padding: 40px 0; - color: var(--color-text-secondary); -} - -.scheduler-task-details { - padding: 16px; - background-color: var(--color-bg-secondary); - border-radius: 4px; -} - -.scheduler-details-grid { - display: grid; - grid-template-columns: 120px 1fr; - gap: 8px 16px; - margin-bottom: 16px; -} - -.scheduler-details-label { - font-weight: 500; - color: var(--color-text-secondary); - display: flex; - align-items: center; -} - -.scheduler-details-value { - color: var(--color-text); - word-break: break-word; -} - -.scheduler-details-actions { - display: flex; - justify-content: flex-end; -} - -.scheduler-form { - background-color: var(--color-bg-secondary); - border-radius: 4px; - padding: 20px; - margin-bottom: 20px; -} - -.scheduler-form-title { - font-size: 1.2rem; - font-weight: 500; - margin-bottom: 20px; - padding-bottom: 10px; - border-bottom: 1px solid var(--color-border); -} - -.scheduler-form-grid { - display: grid; - grid-template-columns: 1fr 1fr; - gap: 16px; - margin-bottom: 20px; -} - -.scheduler-form-field { - display: flex; - flex-direction: column; - gap: 6px; -} - -.full-width { - grid-column: 1 / -1; -} - -.scheduler-form-label { - font-weight: 500; -} - -.scheduler-form-help { - font-size: 12px; - color: var(--color-text-secondary); -} - -.scheduler-form-actions { - display: flex; - justify-content: flex-end; - gap: 12px; -} - -.scheduler-schedule-builder { - display: grid; - grid-template-columns: repeat(5, 1fr); - gap: 12px; -} - -.scheduler-schedule-field { - display: flex; - flex-direction: column; - gap: 4px; - max-width: 70px; /* Limit width of schedule fields */ -} - -.scheduler-schedule-field input { - width: 100%; - min-width: 0; /* Allow shrinking below content size */ - font-size: 0.9rem; /* Slightly reduce font size for better fit */ -} - -.scheduler-schedule-label { - font-size: 12px; - color: var(--color-text-secondary); -} - -.input-group { - display: flex; - gap: 8px; -} - -/* Sort indicators */ -.scheduler-sort-indicator { - display: inline-block; - margin-left: 4px; - transition: transform 0.2s ease; -} - -.scheduler-sort-desc { - transform: rotate(180deg); -} - -/* Responsive adjustments */ -@media (max-width: 768px) { - .scheduler-form-grid { - grid-template-columns: 1fr; - } - - .scheduler-schedule-builder { - grid-template-columns: 1fr 1fr; - } - - .scheduler-filters { - flex-direction: column; - gap: 12px; - } - - .scheduler-task-actions { - flex-wrap: wrap; - } -} - -@media (max-width: 480px) { - nav ul li a { - flex-direction: row; - justify-content: flex-start; - gap: 1rem; - padding: 0.75rem 1rem; - } - - nav ul li a img { - margin-bottom: 0; - width: 30px; - height: 30px; - } -} - -/* Add row hover effect for task list rows matching left panel hover */ -.scheduler-task-list tbody tr { - cursor: pointer; - transition: background-color 0.2s ease; -} - -.scheduler-task-list tbody tr:hover { - background-color: rgba(255, 255, 255, 0.03); -} - -.light-mode .scheduler-task-list tbody tr:hover { - background-color: rgba(0, 0, 0, 0.02); -} - -.scheduler-task-list th { - background-color: var(--color-bg-secondary); - font-weight: 500; - cursor: pointer; - user-select: none; -} - -.scheduler-task-list th:hover { - background-color: var(--color-bg-tertiary); -} - -/* Task detail view styling */ -.scheduler-detail-view { - background-color: var(--color-bg-secondary); - border-radius: 4px; - padding: 20px; - margin-bottom: 20px; - animation: fadeIn 0.3s ease; -} - -.scheduler-detail-header { - display: flex; - justify-content: flex-start; - align-items: center; - margin-bottom: 20px; - padding-bottom: 10px; - border-bottom: 1px solid var(--color-border); - flex-wrap: wrap; - gap: 10px; -} - -.scheduler-detail-header .scheduler-detail-title { - font-size: 1.4rem; - font-weight: 500; - margin: 0; - margin-right: auto; -} - -.scheduler-detail-header .scheduler-status-badge { - margin-right: 10px; -} - -.scheduler-detail-content { - margin-bottom: 20px; -} - -/* Task Scheduler Styles */ - -.scheduler-no-schedule { - color: var(--color-text-secondary); - opacity: 0.7; - font-style: italic; -} - -.task-container-vertical { - display: flex; - flex-direction: column; - width: 100%; - gap: 6px; -} - -/* Smaller status badge for task list */ -.scheduler-status-badge-small { - font-size: 10px; - padding: 2px 6px; - margin-right: 5px; - min-width: 40px; - text-align: center; -} - - - .icon { vertical-align: middle; } - - - /* Animations */ @keyframes fadeIn { from { @@ -2677,12 +1419,6 @@ nav ul li a img { } } -@keyframes shine { - to { - background-position: -200% center; - } -} - @keyframes heartbeat { 0% { transform: scale(1); @@ -2713,3 +1449,155 @@ nav ul li a img { .spin { animation: spin 1s linear infinite; } + +/* Bootstrap Collapse */ +.collapse:not(.show) { + display: none !important; +} +.collapsing { + height: 0; + overflow: hidden; + transition: height .35s ease; +} + +/* Arrow rotation based on Bootstrap's .collapsed class */ +[data-bs-toggle="collapse"] .arrow-icon { + transform: rotate(90deg); + transition: transform .15s ease; +} +[data-bs-toggle="collapse"].collapsed .arrow-icon { + transform: rotate(0deg); +} + +.project-color-ball { + width: 0.6em; + height: 0.6em; + border-radius: 50%; + display: inline-block; + box-sizing: border-box; + flex-shrink: 0; +} + +/* ======================================== + Global Dropdown Component Styles + ======================================== */ + +/* Dropdown container - position relative for absolute menu */ +.dropdown { + position: relative; + display: inline-block; +} + +/* Dropdown trigger button */ +.dropdown-trigger { + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + background: transparent; + border: none; + color: var(--color-text); + padding: var(--spacing-xs); + transition: all var(--transition-speed) ease; +} + +.dropdown-trigger:hover { + opacity: 1; +} + +.dropdown-trigger:active { + opacity: 0.5; +} + +/* Dropdown menu */ +.dropdown-menu { + position: absolute; + top: 100%; + right: 0; + min-width: 180px; + max-height: 70vh; + overflow-y: auto; + background-color: var(--color-panel); + border: 1px solid var(--color-border); + border-radius: 0.5rem; + box-shadow: 0 4px 16px rgba(0, 0, 0, 0.25); + z-index: 1010; + padding: var(--spacing-xs) 0; + margin-top: var(--spacing-xs); +} + +.dropdown-menu.left { + left: 0; + right: auto; +} + +/* Dropdown menu items */ +.dropdown-item { + display: flex; + align-items: center; + gap: var(--spacing-sm); + width: 100%; + padding: var(--spacing-sm) var(--spacing-md); + background: transparent; + border: none; + color: var(--color-text); + font-family: var(--font-family-main); + font-size: var(--font-size-small); + text-align: left; + opacity: 0.8; + cursor: pointer; + transition: background-color 0.15s ease; + white-space: nowrap; +} + +.dropdown-item:hover { + background-color: var(--color-background-hover); + opacity: 1; +} + +.dropdown-item:active { + opacity: 0.7; +} + +.dropdown-item:disabled { + opacity: 0.4; + cursor: not-allowed; + pointer-events: none; +} + +.dropdown-item svg { + width: 18px; + height: 18px; + flex-shrink: 0; + opacity: 0.8; +} + +.dropdown-item .material-symbols-outlined { + font-size: 18px; + flex-shrink: 0; + opacity: 0.8; +} + +/* Dropdown separator */ +.dropdown-separator { + height: 1px; + background-color: var(--color-border); + margin: var(--spacing-xs) 0; +} + +/* Dropdown header/label */ +.dropdown-header { + padding: var(--spacing-xs) var(--spacing-md); + font-size: 0.7rem; + color: var(--color-primary); + text-transform: uppercase; + letter-spacing: 0.05em; + opacity: 0.8; +} + +/* Light mode adjustments */ +.light-mode .dropdown-menu { + box-shadow: 0 4px 16px rgba(0, 0, 0, 0.1); +} + +.light-mode .dropdown-item:hover { background-color: var(--color-background-hover); } diff --git a/webui/index.html b/webui/index.html index 1eaea2471e..f820a14336 100644 --- a/webui/index.html +++ b/webui/index.html @@ -9,15 +9,16 @@ + - - - + + + @@ -29,81 +30,14 @@ } - - - - - - - + + + - @@ -125,247 +59,35 @@ - - - - + - +
- -
- - - - -
- - a0 - -
+ + + + -
- -
-
- - - - - - - + +
+ + -
- - -
-
-
Chats
-
Tasks
-
-
- - -
-
-
    - -
-
-

No chats to list.

-
-
-
- - - -
- -
- -
- -

- Preferences - - - -

-
    - -
  • - Autoscroll - -
  • -
  • - Dark mode - -
  • -
  • - Speech - -
  • -
  • - Show thoughts - -
  • -
  • - Show JSON - -
  • -
  • - Show utility messages - -
  • -
-
-
- -
- Version {{version_no}} {{version_time}} -
-
-
-
- -
-
-
- - - - - - - -
- - -
-
+ +
+
+ +
@@ -381,1372 +103,15 @@

-
-

- |> -

-

- Stop Speech -

-
-
- - -
- -
- - -
- -
- - - -
- Add attachments to the message -
-
- - -
- - - - -
- -
- - - - - - - -
-
- - -
- - - - - - - - - - - - -
+ +
+ + + +
-
- -
- - - -
- -
- - - -
- -
- - -
- -
- @@ -1765,4 +130,4 @@ - \ No newline at end of file + diff --git a/webui/index.js b/webui/index.js index 9d40323b68..76ceb1316c 100644 --- a/webui/index.js +++ b/webui/index.js @@ -5,99 +5,43 @@ import { sleep } from "/js/sleep.js"; import { store as attachmentsStore } from "/components/chat/attachments/attachmentsStore.js"; import { store as speechStore } from "/components/chat/speech/speech-store.js"; import { store as notificationStore } from "/components/notifications/notification-store.js"; +import { store as preferencesStore } from "/components/sidebar/bottom/preferences/preferences-store.js"; +import { store as inputStore } from "/components/chat/input/input-store.js"; +import { store as chatsStore } from "/components/sidebar/chats/chats-store.js"; +import { store as tasksStore } from "/components/sidebar/tasks/tasks-store.js"; +import { store as chatTopStore } from "/components/chat/top-section/chat-top-store.js"; +import { store as _tooltipsStore } from "/components/tooltips/tooltip-store.js"; globalThis.fetchApi = api.fetchApi; // TODO - backward compatibility for non-modular scripts, remove once refactored to alpine -const leftPanel = document.getElementById("left-panel"); -const rightPanel = document.getElementById("right-panel"); -const container = document.querySelector(".container"); -const chatInput = document.getElementById("chat-input"); -const chatHistory = document.getElementById("chat-history"); -const sendButton = document.getElementById("send-button"); -const inputSection = document.getElementById("input-section"); -const statusSection = document.getElementById("status-section"); -const chatsSection = document.getElementById("chats-section"); -const tasksSection = document.getElementById("tasks-section"); -const progressBar = document.getElementById("progress-bar"); -const autoScrollSwitch = document.getElementById("auto-scroll-switch"); -const timeDate = document.getElementById("time-date-container"); +// Declare variables for DOM elements, they will be assigned on DOMContentLoaded +let leftPanel, + rightPanel, + container, + chatInput, + chatHistory, + sendButton, + inputSection, + statusSection, + progressBar, + autoScrollSwitch, + timeDate; let autoScroll = true; -let context = ""; -let resetCounter = 0; +let context = null; +globalThis.resetCounter = 0; // Used by stores and getChatBasedId let skipOneSpeech = false; -let connectionStatus = undefined; // undefined = not checked yet, true = connected, false = disconnected -// Initialize the toggle button -setupSidebarToggle(); -// Initialize tabs -setupTabs(); - -export function getAutoScroll() { - return autoScroll; -} - -function isMobile() { - return window.innerWidth <= 768; -} - -function toggleSidebar(show) { - const overlay = document.getElementById("sidebar-overlay"); - if (typeof show === "boolean") { - leftPanel.classList.toggle("hidden", !show); - rightPanel.classList.toggle("expanded", !show); - overlay.classList.toggle("visible", show); - } else { - leftPanel.classList.toggle("hidden"); - rightPanel.classList.toggle("expanded"); - overlay.classList.toggle( - "visible", - !leftPanel.classList.contains("hidden") - ); - } -} - -function handleResize() { - const overlay = document.getElementById("sidebar-overlay"); - if (isMobile()) { - leftPanel.classList.add("hidden"); - rightPanel.classList.add("expanded"); - overlay.classList.remove("visible"); - } else { - leftPanel.classList.remove("hidden"); - rightPanel.classList.remove("expanded"); - overlay.classList.remove("visible"); - } -} - -globalThis.addEventListener("load", handleResize); -globalThis.addEventListener("resize", handleResize); - -document.addEventListener("DOMContentLoaded", () => { - const overlay = document.getElementById("sidebar-overlay"); - overlay.addEventListener("click", () => { - if (isMobile()) { - toggleSidebar(false); - } - }); -}); - -function setupSidebarToggle() { - const leftPanel = document.getElementById("left-panel"); - const rightPanel = document.getElementById("right-panel"); - const toggleSidebarButton = document.getElementById("toggle-sidebar"); - if (toggleSidebarButton) { - toggleSidebarButton.addEventListener("click", toggleSidebar); - } else { - console.error("Toggle sidebar button not found"); - setTimeout(setupSidebarToggle, 100); - } -} -document.addEventListener("DOMContentLoaded", setupSidebarToggle); +// Sidebar toggle logic is now handled by sidebar-store.js export async function sendMessage() { + const chatInputEl = document.getElementById("chat-input"); + if (!chatInputEl) { + console.warn("chatInput not available, cannot send message"); + return; + } try { - const message = chatInput.value.trim(); + const message = chatInputEl.value.trim(); const attachmentsWithUrls = attachmentsStore.getAttachmentsForSending(); const hasAttachments = attachmentsWithUrls.length > 0; @@ -106,7 +50,7 @@ export async function sendMessage() { const messageId = generateGUID(); // Clear input and attachments - chatInput.value = ""; + chatInputEl.value = ""; attachmentsStore.clearAttachments(); adjustTextareaHeight(); @@ -166,8 +110,9 @@ export async function sendMessage() { toastFetchError("Error sending message", e); // Will use new notification system } } +globalThis.sendMessage = sendMessage; -function toastFetchError(text, error) { +export function toastFetchError(text, error) { console.error(text, error); // Use new frontend error notification system (async, but we don't need to wait) const errorMessage = error?.message || error?.toString() || "Unknown error"; @@ -187,31 +132,36 @@ function toastFetchError(text, error) { } globalThis.toastFetchError = toastFetchError; -chatInput.addEventListener("keydown", (e) => { - if (e.key === "Enter" && !e.shiftKey && !e.isComposing && e.keyCode !== 229) { - e.preventDefault(); - sendMessage(); - } -}); - -sendButton.addEventListener("click", sendMessage); +// Event listeners will be set up in DOMContentLoaded export function updateChatInput(text) { + const chatInputEl = document.getElementById("chat-input"); + if (!chatInputEl) { + console.warn("`chatInput` element not found, cannot update."); + return; + } console.log("updateChatInput called with:", text); // Append text with proper spacing - const currentValue = chatInput.value; + const currentValue = chatInputEl.value; const needsSpace = currentValue.length > 0 && !currentValue.endsWith(" "); - chatInput.value = currentValue + (needsSpace ? " " : "") + text + " "; + chatInputEl.value = currentValue + (needsSpace ? " " : "") + text + " "; // Adjust height and trigger input event adjustTextareaHeight(); - chatInput.dispatchEvent(new Event("input")); + chatInputEl.dispatchEvent(new Event("input")); - console.log("Updated chat input value:", chatInput.value); + console.log("Updated chat input value:", chatInputEl.value); } -function updateUserTime() { +async function updateUserTime() { + let userTimeElement = document.getElementById("time-date"); + + while (!userTimeElement) { + await sleep(100); + userTimeElement = document.getElementById("time-date"); + } + const now = new Date(); const hours = now.getHours(); const minutes = now.getMinutes(); @@ -229,59 +179,31 @@ function updateUserTime() { const dateString = now.toLocaleDateString(undefined, options); // Update the HTML - const userTimeElement = document.getElementById("time-date"); userTimeElement.innerHTML = `${timeString}
${dateString}`; } updateUserTime(); setInterval(updateUserTime, 1000); -function setMessage(id, type, heading, content, temp, kvps = null) { - const result = msgs.setMessage(id, type, heading, content, temp, kvps); - if (autoScroll) chatHistory.scrollTop = chatHistory.scrollHeight; +function setMessage(id, type, heading, content, temp, kvps = null, timestamp = null, durationMs = null, /* tokensIn = 0, tokensOut = 0, */ agentNumber = 0) { + const result = msgs.setMessage(id, type, heading, content, temp, kvps, timestamp, durationMs, /* tokensIn, tokensOut, */ agentNumber); + const chatHistoryEl = document.getElementById("chat-history"); + if (preferencesStore.autoScroll && chatHistoryEl) { + chatHistoryEl.scrollTop = chatHistoryEl.scrollHeight; + } return result; } globalThis.loadKnowledge = async function () { - const input = document.createElement("input"); - input.type = "file"; - input.accept = ".txt,.pdf,.csv,.html,.json,.md"; - input.multiple = true; - - input.onchange = async () => { - try { - const formData = new FormData(); - for (let file of input.files) { - formData.append("files[]", file); - } - - formData.append("ctxid", getContext()); - - const response = await api.fetchApi("/import_knowledge", { - method: "POST", - body: formData, - }); - - if (!response.ok) { - toast(await response.text(), "error"); - } else { - const data = await response.json(); - toast( - "Knowledge files imported: " + data.filenames.join(", "), - "success" - ); - } - } catch (e) { - toastFetchError("Error loading knowledge", e); - } - }; - - input.click(); + await inputStore.loadKnowledge(); }; function adjustTextareaHeight() { - chatInput.style.height = "auto"; - chatInput.style.height = chatInput.scrollHeight + "px"; + const chatInputEl = document.getElementById("chat-input"); + if (chatInputEl) { + chatInputEl.style.height = "auto"; + chatInputEl.style.height = chatInputEl.scrollHeight + "px"; + } } export const sendJsonData = async function (url, data) { @@ -311,28 +233,29 @@ function generateGUID() { }); } -function getConnectionStatus() { - return connectionStatus; +export function getConnectionStatus() { + return chatTopStore.connected; } +globalThis.getConnectionStatus = getConnectionStatus; function setConnectionStatus(connected) { - connectionStatus = connected; - if (globalThis.Alpine && timeDate) { - const statusIconEl = timeDate.querySelector(".status-icon"); - if (statusIconEl) { - const statusIcon = Alpine.$data(statusIconEl); - if (statusIcon) { - statusIcon.connected = connected; - } - } - } + chatTopStore.connected = connected; + // connectionStatus = connected; + // // Broadcast connection status without touching Alpine directly + // try { + // window.dispatchEvent( + // new CustomEvent("connection-status", { detail: { connected } }) + // ); + // } catch (_e) { + // // no-op + // } } let lastLogVersion = 0; let lastLogGuid = ""; let lastSpokenNo = 0; -async function poll() { +export async function poll() { let updated = false; try { // Get timezone from navigator @@ -352,12 +275,25 @@ async function poll() { return false; } - if (!context) setContext(response.context); - if (response.context != context) return; //skip late polls after context change + // deselect chat if it is requested by the backend + if (response.deselect_chat) { + chatsStore.deselectChat(); + return + } + + if ( + response.context != context && + !(response.context === null && context === null) && + context !== null + ) { + return; + } // if the chat has been reset, restart this poll as it may have been called with incorrect log_from if (lastLogGuid != response.log_guid) { - chatHistory.innerHTML = ""; + const chatHistoryEl = document.getElementById("chat-history"); + if (chatHistoryEl) chatHistoryEl.innerHTML = ""; + msgs.resetProcessGroups(); // Reset process groups on chat reset lastLogVersion = 0; lastLogGuid = response.log_guid; await poll(); @@ -374,7 +310,12 @@ async function poll() { log.heading, log.content, log.temp, - log.kvps + log.kvps, + log.timestamp, + log.duration_ms, + // log.tokens_in, + // log.tokens_out, + log.agent_number || 0 // Agent number for identifying main/subordinate agents ); } afterMessagesUpdate(response.logs); @@ -389,118 +330,58 @@ async function poll() { notificationStore.updateFromPoll(response); //set ui model vars from backend - if (globalThis.Alpine && inputSection) { - const inputAD = Alpine.$data(inputSection); - if (inputAD) { - inputAD.paused = response.paused; - } - } + inputStore.paused = response.paused; // Update status icon state setConnectionStatus(true); - // Update chats list and sort by created_at time (newer first) - let chatsAD = null; + // Update chats list using store let contexts = response.contexts || []; - if (globalThis.Alpine && chatsSection) { - chatsAD = Alpine.$data(chatsSection); - if (chatsAD) { - chatsAD.contexts = contexts.sort( - (a, b) => (b.created_at || 0) - (a.created_at || 0) - ); - } - } + chatsStore.applyContexts(contexts); - // Update tasks list and sort by creation time (newer first) - const tasksSection = document.getElementById("tasks-section"); - if (globalThis.Alpine && tasksSection) { - const tasksAD = Alpine.$data(tasksSection); - if (tasksAD) { - let tasks = response.tasks || []; - - // Always update tasks to ensure state changes are reflected - if (tasks.length > 0) { - // Sort the tasks by creation time - const sortedTasks = [...tasks].sort( - (a, b) => (b.created_at || 0) - (a.created_at || 0) - ); - - // Assign the sorted tasks to the Alpine data - tasksAD.tasks = sortedTasks; - } else { - // Make sure to use a new empty array instance - tasksAD.tasks = []; - } - } - } + // Update tasks list using store + let tasks = response.tasks || []; + tasksStore.applyTasks(tasks); // Make sure the active context is properly selected in both lists if (context) { - // Update selection in the active tab - const activeTab = localStorage.getItem("activeTab") || "chats"; - - if (activeTab === "chats" && chatsAD) { - chatsAD.selected = context; - localStorage.setItem("lastSelectedChat", context); + // Update selection in both stores + chatsStore.setSelected(context); - // Check if this context exists in the chats list - const contextExists = contexts.some((ctx) => ctx.id === context); + const contextInChats = chatsStore.contains(context); + const contextInTasks = tasksStore.contains(context); - // If it doesn't exist in the chats list but we're in chats tab, try to select the first chat - if (!contextExists && contexts.length > 0) { - // Check if the current context is empty before creating a new one - // If there's already a current context and we're just updating UI, don't automatically - // create a new context by calling setContext - const firstChatId = contexts[0].id; + if (contextInTasks) { + tasksStore.setSelected(context); + } - // Only create a new context if we're not currently in an existing context - // This helps prevent duplicate contexts when switching tabs - setContext(firstChatId); - chatsAD.selected = firstChatId; - localStorage.setItem("lastSelectedChat", firstChatId); - } - } else if (activeTab === "tasks" && tasksSection) { - const tasksAD = Alpine.$data(tasksSection); - tasksAD.selected = context; - localStorage.setItem("lastSelectedTask", context); - - // Check if this context exists in the tasks list - const taskExists = response.tasks?.some((task) => task.id === context); - - // If it doesn't exist in the tasks list but we're in tasks tab, try to select the first task - if (!taskExists && response.tasks?.length > 0) { - const firstTaskId = response.tasks[0].id; - setContext(firstTaskId); - tasksAD.selected = firstTaskId; - localStorage.setItem("lastSelectedTask", firstTaskId); + if (!contextInChats && !contextInTasks) { + if (chatsStore.contexts.length > 0) { + // If it doesn't exist in the list but other contexts do, fall back to the first + const firstChatId = chatsStore.firstId(); + if (firstChatId) { + setContext(firstChatId); + chatsStore.setSelected(firstChatId); + } + } else if (typeof deselectChat === "function") { + // No contexts remain – clear state so the welcome screen can surface + deselectChat(); } } - } else if ( - response.tasks && - response.tasks.length > 0 && - localStorage.getItem("activeTab") === "tasks" - ) { - // If we're in tasks tab with no selection but have tasks, select the first one - const firstTaskId = response.tasks[0].id; - setContext(firstTaskId); - if (tasksSection) { - const tasksAD = Alpine.$data(tasksSection); - tasksAD.selected = firstTaskId; - localStorage.setItem("lastSelectedTask", firstTaskId); - } - } else if ( - contexts.length > 0 && - localStorage.getItem("activeTab") === "chats" && - chatsAD - ) { - // If we're in chats tab with no selection but have chats, select the first one - const firstChatId = contexts[0].id; - - // Only set context if we don't already have one to avoid duplicates - if (!context) { - setContext(firstChatId); - chatsAD.selected = firstChatId; - localStorage.setItem("lastSelectedChat", firstChatId); + } else { + const welcomeStore = + globalThis.Alpine && typeof globalThis.Alpine.store === "function" + ? globalThis.Alpine.store("welcomeStore") + : null; + const welcomeVisible = Boolean(welcomeStore && welcomeStore.isVisible); + + // No context selected, try to select the first available item unless welcome screen is active + if (!welcomeVisible && contexts.length > 0) { + const firstChatId = chatsStore.firstId(); + if (firstChatId) { + setContext(firstChatId); + chatsStore.setSelected(firstChatId); + } } } @@ -513,6 +394,7 @@ async function poll() { return updated; } +globalThis.poll = poll; function afterMessagesUpdate(logs) { if (localStorage.getItem("speech") == "true") { @@ -558,187 +440,31 @@ function speakMessages(logs) { } function updateProgress(progress, active) { + const progressBarEl = document.getElementById("progress-bar"); + if (!progressBarEl) return; if (!progress) progress = ""; if (!active) { - removeClassFromElement(progressBar, "shiny-text"); + removeClassFromElement(progressBarEl, "shiny-text"); } else { - addClassToElement(progressBar, "shiny-text"); + addClassToElement(progressBarEl, "shiny-text"); } progress = msgs.convertIcons(progress); - if (progressBar.innerHTML != progress) { - progressBar.innerHTML = progress; + if (progressBarEl.innerHTML != progress) { + progressBarEl.innerHTML = progress; } } globalThis.pauseAgent = async function (paused) { - try { - const resp = await sendJsonData("/pause", { paused: paused, context }); - } catch (e) { - globalThis.toastFetchError("Error pausing agent", e); - } -}; - -globalThis.resetChat = async function (ctxid = null) { - try { - const resp = await sendJsonData("/chat_reset", { - context: ctxid === null ? context : ctxid, - }); - resetCounter++; - if (ctxid === null) updateAfterScroll(); - } catch (e) { - globalThis.toastFetchError("Error resetting chat", e); - } -}; - -globalThis.newChat = async function () { - try { - newContext(); - updateAfterScroll(); - } catch (e) { - globalThis.toastFetchError("Error creating new chat", e); - } -}; - -globalThis.killChat = async function (id) { - if (!id) { - console.error("No chat ID provided for deletion"); - return; - } - - console.log("Deleting chat with ID:", id); - - try { - const chatsAD = Alpine.$data(chatsSection); - console.log( - "Current contexts before deletion:", - JSON.stringify(chatsAD.contexts.map((c) => ({ id: c.id, name: c.name }))) - ); - - // switch to another context if deleting current - switchFromContext(id); - - // Delete the chat on the server - await sendJsonData("/chat_remove", { context: id }); - - // Update the UI manually to ensure the correct chat is removed - // Deep clone the contexts array to prevent reference issues - const updatedContexts = chatsAD.contexts.filter((ctx) => ctx.id !== id); - console.log( - "Updated contexts after deletion:", - JSON.stringify(updatedContexts.map((c) => ({ id: c.id, name: c.name }))) - ); - - // Force UI update by creating a new array - chatsAD.contexts = [...updatedContexts]; - - updateAfterScroll(); - - justToast("Chat deleted successfully", "success", 1000, "chat-removal"); - } catch (e) { - console.error("Error deleting chat:", e); - globalThis.toastFetchError("Error deleting chat", e); - } -}; - -export function switchFromContext(id) { - // If we're deleting the currently selected chat, switch to another one first - if (context === id) { - const chatsAD = Alpine.$data(chatsSection); - - // Find an alternate chat to switch to if we're deleting the current one - let alternateChat = null; - for (let i = 0; i < chatsAD.contexts.length; i++) { - if (chatsAD.contexts[i].id !== id) { - alternateChat = chatsAD.contexts[i]; - break; - } - } - - if (alternateChat) { - setContext(alternateChat.id); - } else { - // If no other chats, create a new empty context - newContext(); - } - } -} - -// Function to ensure proper UI state when switching contexts -function ensureProperTabSelection(contextId) { - // Get current active tab - const activeTab = localStorage.getItem("activeTab") || "chats"; - - // First attempt to determine if this is a task or chat based on the task list - const tasksSection = document.getElementById("tasks-section"); - let isTask = false; - - if (tasksSection) { - const tasksAD = Alpine.$data(tasksSection); - if (tasksAD && tasksAD.tasks) { - isTask = tasksAD.tasks.some((task) => task.id === contextId); - } - } - - // If we're selecting a task but are in the chats tab, switch to tasks tab - if (isTask && activeTab === "chats") { - // Store this as the last selected task before switching - localStorage.setItem("lastSelectedTask", contextId); - activateTab("tasks"); - return true; - } - - // If we're selecting a chat but are in the tasks tab, switch to chats tab - if (!isTask && activeTab === "tasks") { - // Store this as the last selected chat before switching - localStorage.setItem("lastSelectedChat", contextId); - activateTab("chats"); - return true; - } - - return false; -} - -globalThis.selectChat = async function (id) { - if (id === context) return; //already selected - - // Check if we need to switch tabs based on the context type - const tabSwitched = ensureProperTabSelection(id); - - // If we didn't switch tabs, proceed with normal selection - if (!tabSwitched) { - // Switch to the new context - this will clear chat history and reset tracking variables - setContext(id); - - // Update both contexts and tasks lists to reflect the selected item - const chatsAD = Alpine.$data(chatsSection); - const tasksSection = document.getElementById("tasks-section"); - if (tasksSection) { - const tasksAD = Alpine.$data(tasksSection); - tasksAD.selected = id; - } - chatsAD.selected = id; - - // Store this selection in the appropriate localStorage key - const activeTab = localStorage.getItem("activeTab") || "chats"; - if (activeTab === "chats") { - localStorage.setItem("lastSelectedChat", id); - } else if (activeTab === "tasks") { - localStorage.setItem("lastSelectedTask", id); - } - - // Trigger an immediate poll to fetch content - poll(); - } - - updateAfterScroll(); + await inputStore.pauseAgent(paused); }; function generateShortId() { - const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; - let result = ''; + const chars = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + let result = ""; for (let i = 0; i < 8; i++) { result += chars.charAt(Math.floor(Math.random() * chars.length)); } @@ -748,7 +474,8 @@ function generateShortId() { export const newContext = function () { context = generateShortId(); setContext(context); -} +}; +globalThis.newContext = newContext; export const setContext = function (id) { if (id == context) return; @@ -762,240 +489,44 @@ export const setContext = function (id) { // Stop speech when switching chats speechStore.stopAudio(); + // Reset process groups for new context + msgs.resetProcessGroups(); + // Clear the chat history immediately to avoid showing stale content - chatHistory.innerHTML = ""; + const chatHistoryEl = document.getElementById("chat-history"); + if (chatHistoryEl) chatHistoryEl.innerHTML = ""; - // Update both selected states - if (globalThis.Alpine) { - if (chatsSection) { - const chatsAD = Alpine.$data(chatsSection); - if (chatsAD) chatsAD.selected = id; - } - if (tasksSection) { - const tasksAD = Alpine.$data(tasksSection); - if (tasksAD) tasksAD.selected = id; - } - } + // Update both selected states using stores + chatsStore.setSelected(id); + tasksStore.setSelected(id); //skip one speech if enabled when switching context if (localStorage.getItem("speech") == "true") skipOneSpeech = true; }; -export const getContext = function () { - return context; -}; - -export const getChatBasedId = function (id) { - return context + "-" + resetCounter + "-" + id; -}; - -globalThis.toggleAutoScroll = async function (_autoScroll) { - autoScroll = _autoScroll; -}; - -globalThis.toggleJson = async function (showJson) { - css.toggleCssProperty(".msg-json", "display", showJson ? "block" : "none"); -}; - -globalThis.toggleThoughts = async function (showThoughts) { - css.toggleCssProperty( - ".msg-thoughts", - "display", - showThoughts ? undefined : "none" - ); -}; - -globalThis.toggleUtils = async function (showUtils) { - css.toggleCssProperty( - ".message-util", - "display", - showUtils ? undefined : "none" - ); -}; - -globalThis.toggleDarkMode = function (isDark) { - if (isDark) { - document.body.classList.remove("light-mode"); - document.body.classList.add("dark-mode"); - } else { - document.body.classList.remove("dark-mode"); - document.body.classList.add("light-mode"); - } - console.log("Dark mode:", isDark); - localStorage.setItem("darkMode", isDark); -}; - -globalThis.toggleSpeech = function (isOn) { - console.log("Speech:", isOn); - localStorage.setItem("speech", isOn); - if (!isOn) speechStore.stopAudio(); -}; - -globalThis.nudge = async function () { - try { - const resp = await sendJsonData("/nudge", { ctxid: getContext() }); - } catch (e) { - toastFetchError("Error nudging agent", e); - } -}; +export const deselectChat = function () { + // Clear current context to show welcome screen + setContext(null); -globalThis.restart = async function () { - try { - if (!getConnectionStatus()) { - await toastFrontendError( - "Backend disconnected, cannot restart.", - "Restart Error" - ); - return; - } - // First try to initiate restart - const resp = await sendJsonData("/restart", {}); - } catch (e) { - // Show restarting message with no timeout and restart group - await toastFrontendInfo("Restarting...", "System Restart", 9999, "restart"); - - let retries = 0; - const maxRetries = 240; // Maximum number of retries (60 seconds with 250ms interval) - - while (retries < maxRetries) { - try { - const resp = await sendJsonData("/health", {}); - // Server is back up, show success message that replaces the restarting message - await new Promise((resolve) => setTimeout(resolve, 250)); - await toastFrontendSuccess("Restarted", "System Restart", 5, "restart"); - return; - } catch (e) { - // Server still down, keep waiting - retries++; - await new Promise((resolve) => setTimeout(resolve, 250)); - } - } + // Clear localStorage selections so we don't auto-restore + localStorage.removeItem("lastSelectedChat"); + localStorage.removeItem("lastSelectedTask"); - // If we get here, restart failed or took too long - await toastFrontendError( - "Restart timed out or failed", - "Restart Error", - 8, - "restart" - ); - } + // Clear the chat history + chatHistory.innerHTML = ""; }; +globalThis.deselectChat = deselectChat; -// Modify this part -document.addEventListener("DOMContentLoaded", () => { - const isDarkMode = localStorage.getItem("darkMode") !== "false"; - toggleDarkMode(isDarkMode); -}); - -globalThis.loadChats = async function () { - try { - const fileContents = await readJsonFiles(); - const response = await sendJsonData("/chat_load", { chats: fileContents }); - - if (!response) { - toast("No response returned.", "error"); - } - // else if (!response.ok) { - // if (response.message) { - // toast(response.message, "error") - // } else { - // toast("Undefined error.", "error") - // } - // } - else { - setContext(response.ctxids[0]); - toast("Chats loaded.", "success"); - } - } catch (e) { - toastFetchError("Error loading chats", e); - } +export const getContext = function () { + return context; }; +globalThis.getContext = getContext; +globalThis.setContext = setContext; -globalThis.saveChat = async function () { - try { - const response = await sendJsonData("/chat_export", { ctxid: context }); - - if (!response) { - toast("No response returned.", "error"); - } - // else if (!response.ok) { - // if (response.message) { - // toast(response.message, "error") - // } else { - // toast("Undefined error.", "error") - // } - // } - else { - downloadFile(response.ctxid + ".json", response.content); - toast("Chat file downloaded.", "success"); - } - } catch (e) { - toastFetchError("Error saving chat", e); - } +export const getChatBasedId = function (id) { + return context + "-" + globalThis.resetCounter + "-" + id; }; -function downloadFile(filename, content) { - // Create a Blob with the content to save - const blob = new Blob([content], { type: "application/json" }); - - // Create a link element - const link = document.createElement("a"); - - // Create a URL for the Blob - const url = URL.createObjectURL(blob); - link.href = url; - - // Set the file name for download - link.download = filename; - - // Programmatically click the link to trigger the download - link.click(); - - // Clean up by revoking the object URL - setTimeout(() => { - URL.revokeObjectURL(url); - }, 0); -} - -function readJsonFiles() { - return new Promise((resolve, reject) => { - // Create an input element of type 'file' - const input = document.createElement("input"); - input.type = "file"; - input.accept = ".json"; // Only accept JSON files - input.multiple = true; // Allow multiple file selection - - // Trigger the file dialog - input.click(); - - // When files are selected - input.onchange = async () => { - const files = input.files; - if (!files.length) { - resolve([]); // Return an empty array if no files are selected - return; - } - - // Read each file as a string and store in an array - const filePromises = Array.from(files).map((file) => { - return new Promise((fileResolve, fileReject) => { - const reader = new FileReader(); - reader.onload = () => fileResolve(reader.result); - reader.onerror = fileReject; - reader.readAsText(file); - }); - }); - - try { - const fileContents = await Promise.all(filePromises); - resolve(fileContents); - } catch (error) { - reject(error); // In case of any file reading error - } - }; - }); -} - function addClassToElement(element, className) { element.classList.add(className); } @@ -1004,64 +535,51 @@ function removeClassFromElement(element, className) { element.classList.remove(className); } -function justToast(text, type = "info", timeout = 5000, group = "") { - notificationStore.addFrontendToastOnly( - type, - text, - "", - timeout / 1000, - group - ) +export function justToast(text, type = "info", timeout = 5000, group = "") { + notificationStore.addFrontendToastOnly(type, text, "", timeout / 1000, group); } - +globalThis.justToast = justToast; -function toast(text, type = "info", timeout = 5000) { +export function toast(text, type = "info", timeout = 5000) { // Convert timeout from milliseconds to seconds for new notification system const display_time = Math.max(timeout / 1000, 1); // Minimum 1 second // Use new frontend notification system based on type - switch (type.toLowerCase()) { - case "error": - return notificationStore.frontendError(text, "Error", display_time); - case "success": - return notificationStore.frontendInfo(text, "Success", display_time); - case "warning": - return notificationStore.frontendWarning(text, "Warning", display_time); - case "info": - default: - return notificationStore.frontendInfo(text, "Info", display_time); - } - + switch (type.toLowerCase()) { + case "error": + return notificationStore.frontendError(text, "Error", display_time); + case "success": + return notificationStore.frontendInfo(text, "Success", display_time); + case "warning": + return notificationStore.frontendWarning(text, "Warning", display_time); + case "info": + default: + return notificationStore.frontendInfo(text, "Info", display_time); + } } globalThis.toast = toast; // OLD: hideToast function removed - now using new notification system function scrollChanged(isAtBottom) { - if (globalThis.Alpine && autoScrollSwitch) { - const inputAS = Alpine.$data(autoScrollSwitch); - if (inputAS) { - inputAS.autoScroll = isAtBottom; - } - } - // autoScrollSwitch.checked = isAtBottom + // Reflect scroll state into preferences store; UI is bound via x-model + preferencesStore.autoScroll = isAtBottom; } -function updateAfterScroll() { +export function updateAfterScroll() { // const toleranceEm = 1; // Tolerance in em units // const tolerancePx = toleranceEm * parseFloat(getComputedStyle(document.documentElement).fontSize); // Convert em to pixels const tolerancePx = 10; const chatHistory = document.getElementById("chat-history"); + if (!chatHistory) return; + const isAtBottom = chatHistory.scrollHeight - chatHistory.scrollTop <= chatHistory.clientHeight + tolerancePx; scrollChanged(isAtBottom); } - -chatHistory.addEventListener("scroll", updateAfterScroll); - -chatInput.addEventListener("input", adjustTextareaHeight); +globalThis.updateAfterScroll = updateAfterScroll; // setInterval(poll, 250); @@ -1090,193 +608,36 @@ async function startPolling() { _doPoll(); } -document.addEventListener("DOMContentLoaded", startPolling); - -// Setup event handlers once the DOM is fully loaded +// All initializations and event listeners are now consolidated here document.addEventListener("DOMContentLoaded", function () { - setupSidebarToggle(); - setupTabs(); - initializeActiveTab(); + // Assign DOM elements to variables now that the DOM is ready + leftPanel = document.getElementById("left-panel"); + rightPanel = document.getElementById("right-panel"); + container = document.querySelector(".container"); + chatInput = document.getElementById("chat-input"); + chatHistory = document.getElementById("chat-history"); + sendButton = document.getElementById("send-button"); + inputSection = document.getElementById("input-section"); + statusSection = document.getElementById("status-section"); + progressBar = document.getElementById("progress-bar"); + autoScrollSwitch = document.getElementById("auto-scroll-switch"); + timeDate = document.getElementById("time-date-container"); + + // Sidebar and input event listeners are now handled by their respective stores + + if (chatHistory) { + chatHistory.addEventListener("scroll", updateAfterScroll); + } + + // Start polling for updates + startPolling(); }); -// Setup tabs functionality -function setupTabs() { - const chatsTab = document.getElementById("chats-tab"); - const tasksTab = document.getElementById("tasks-tab"); - - if (chatsTab && tasksTab) { - chatsTab.addEventListener("click", function () { - activateTab("chats"); - }); - - tasksTab.addEventListener("click", function () { - activateTab("tasks"); - }); - } else { - console.error("Tab elements not found"); - setTimeout(setupTabs, 100); // Retry setup - } -} - -function activateTab(tabName) { - const chatsTab = document.getElementById("chats-tab"); - const tasksTab = document.getElementById("tasks-tab"); - const chatsSection = document.getElementById("chats-section"); - const tasksSection = document.getElementById("tasks-section"); - - // Get current context to preserve before switching - const currentContext = context; - - // Store the current selection for the active tab before switching - const previousTab = localStorage.getItem("activeTab"); - if (previousTab === "chats") { - localStorage.setItem("lastSelectedChat", currentContext); - } else if (previousTab === "tasks") { - localStorage.setItem("lastSelectedTask", currentContext); - } - - // Reset all tabs and sections - chatsTab.classList.remove("active"); - tasksTab.classList.remove("active"); - chatsSection.style.display = "none"; - tasksSection.style.display = "none"; - - // Remember the last active tab in localStorage - localStorage.setItem("activeTab", tabName); - - // Activate selected tab and section - if (tabName === "chats") { - chatsTab.classList.add("active"); - chatsSection.style.display = ""; - - // Get the available contexts from Alpine.js data - const chatsAD = globalThis.Alpine ? Alpine.$data(chatsSection) : null; - const availableContexts = chatsAD?.contexts || []; - - // Restore previous chat selection - const lastSelectedChat = localStorage.getItem("lastSelectedChat"); - - // Only switch if: - // 1. lastSelectedChat exists AND - // 2. It's different from current context AND - // 3. The context actually exists in our contexts list OR there are no contexts yet - if ( - lastSelectedChat && - lastSelectedChat !== currentContext && - (availableContexts.some((ctx) => ctx.id === lastSelectedChat) || - availableContexts.length === 0) - ) { - setContext(lastSelectedChat); - } - } else if (tabName === "tasks") { - tasksTab.classList.add("active"); - tasksSection.style.display = "flex"; - tasksSection.style.flexDirection = "column"; - - // Get the available tasks from Alpine.js data - const tasksAD = globalThis.Alpine ? Alpine.$data(tasksSection) : null; - const availableTasks = tasksAD?.tasks || []; - - // Restore previous task selection - const lastSelectedTask = localStorage.getItem("lastSelectedTask"); - - // Only switch if: - // 1. lastSelectedTask exists AND - // 2. It's different from current context AND - // 3. The task actually exists in our tasks list - if ( - lastSelectedTask && - lastSelectedTask !== currentContext && - availableTasks.some((task) => task.id === lastSelectedTask) - ) { - setContext(lastSelectedTask); - } - } - - // Request a poll update - poll(); -} - -// Add function to initialize active tab and selections from localStorage -function initializeActiveTab() { - // Initialize selection storage if not present - if (!localStorage.getItem("lastSelectedChat")) { - localStorage.setItem("lastSelectedChat", ""); - } - if (!localStorage.getItem("lastSelectedTask")) { - localStorage.setItem("lastSelectedTask", ""); - } - - const activeTab = localStorage.getItem("activeTab") || "chats"; - activateTab(activeTab); -} - /* * A0 Chat UI * - * Tasks tab functionality: - * - Tasks are displayed in the Tasks tab with the same mechanics as chats + * Unified sidebar layout: + * - Both Chats and Tasks lists are always visible in a vertical layout * - Both lists are sorted by creation time (newest first) - * - Selection state is preserved across tab switches - * - The active tab is remembered across sessions * - Tasks use the same context system as chats for communication with the backend - * - Future support for renaming and deletion will be implemented later */ - -// Open the scheduler detail view for a specific task -function openTaskDetail(taskId) { - // Wait for Alpine.js to be fully loaded - if (globalThis.Alpine) { - // Get the settings modal button and click it to ensure all init logic happens - const settingsButton = document.getElementById("settings"); - if (settingsButton) { - // Programmatically click the settings button - settingsButton.click(); - - // Now get a reference to the modal element - const modalEl = document.getElementById("settingsModal"); - if (!modalEl) { - console.error("Settings modal element not found after clicking button"); - return; - } - - // Get the Alpine.js data for the modal - const modalData = globalThis.Alpine ? Alpine.$data(modalEl) : null; - - // Use a timeout to ensure the modal is fully rendered - setTimeout(() => { - // Switch to the scheduler tab first - modalData.switchTab("scheduler"); - - // Use another timeout to ensure the scheduler component is initialized - setTimeout(() => { - // Get the scheduler component - const schedulerComponent = document.querySelector( - '[x-data="schedulerSettings"]' - ); - if (!schedulerComponent) { - console.error("Scheduler component not found"); - return; - } - - // Get the Alpine.js data for the scheduler component - const schedulerData = globalThis.Alpine - ? Alpine.$data(schedulerComponent) - : null; - - // Show the task detail view for the specific task - schedulerData.showTaskDetail(taskId); - - console.log("Task detail view opened for task:", taskId); - }, 50); // Give time for the scheduler tab to initialize - }, 25); // Give time for the modal to render - } else { - console.error("Settings button not found"); - } - } else { - console.error("Alpine.js not loaded"); - } -} - -// Make the function available globally -globalThis.openTaskDetail = openTaskDetail; diff --git a/webui/js/AlpineStore.js b/webui/js/AlpineStore.js index fea2d457f4..8ad68f9eda 100644 --- a/webui/js/AlpineStore.js +++ b/webui/js/AlpineStore.js @@ -44,4 +44,80 @@ export function createStore(name, initialState) { */ export function getStore(name) { return /** @type {T | undefined} */ (stores.get(name)); +} + +/** + * Save current state of a store into a plain object, with optional include/exclude filters. + * If exclude (blacklist) is provided and non-empty, everything except excluded keys is saved. + * Otherwise, if include (whitelist) is provided and non-empty, only included keys are saved. + * If both are empty, all own enumerable properties are saved. + * @param {object} store + * @param {string[]} [include] + * @param {string[]} [exclude] + * @returns {object} + */ +export function saveState(store, include = [], exclude = []) { + const hasExclude = Array.isArray(exclude) && exclude.length > 0; + const hasInclude = !hasExclude && Array.isArray(include) && include.length > 0; + + /** @type {Record} */ + const snapshot = {}; + + for (const key of Object.keys(store)) { + if (hasExclude) { + if (exclude.includes(key)) continue; + } else if (hasInclude) { + if (!include.includes(key)) continue; + } + + const value = store[key]; + if (typeof value === "function") continue; + + if (Array.isArray(value)) { + snapshot[key] = value.map((item) => + typeof item === "object" && item !== null ? { ...item } : item + ); + } else if (typeof value === "object" && value !== null) { + snapshot[key] = { ...value }; + } else { + snapshot[key] = value; + } + } + + return snapshot; +} + +/** + * Load a previously saved state object back into a store, honoring include/exclude filters. + * Filtering rules are the same as in saveState. + * @param {object} store + * @param {object} state + * @param {string[]} [include] + * @param {string[]} [exclude] + */ +export function loadState(store, state, include = [], exclude = []) { + if (!state) return; + + const hasExclude = Array.isArray(exclude) && exclude.length > 0; + const hasInclude = !hasExclude && Array.isArray(include) && include.length > 0; + + for (const key of Object.keys(state)) { + if (hasExclude) { + if (exclude.includes(key)) continue; + } else if (hasInclude) { + if (!include.includes(key)) continue; + } + + const value = state[key]; + + if (Array.isArray(value)) { + store[key] = value.map((item) => + typeof item === "object" && item !== null ? { ...item } : item + ); + } else if (typeof value === "object" && value !== null) { + store[key] = { ...value }; + } else { + store[key] = value; + } + } } \ No newline at end of file diff --git a/webui/js/api.js b/webui/js/api.js index 2baa1eb9a1..33f8934447 100644 --- a/webui/js/api.js +++ b/webui/js/api.js @@ -52,7 +52,7 @@ export async function fetchApi(url, request) { // retry the request with new token csrfToken = null; return await _wrap(false); - }else if(response.redirected && response.url.endsWith("/login")){ + } else if (response.redirected && response.url.endsWith("/login")) { // redirect to login window.location.href = response.url; return; @@ -88,7 +88,12 @@ async function getCsrfToken() { return; } const json = await response.json(); - csrfToken = json.token; - document.cookie = `csrf_token_${json.runtime_id}=${csrfToken}; SameSite=Strict; Path=/`; - return csrfToken; + if (json.ok) { + csrfToken = json.token; + document.cookie = `csrf_token_${json.runtime_id}=${csrfToken}; SameSite=Strict; Path=/`; + return csrfToken; + } else { + if (json.error) alert(json.error); + throw new Error(json.error || "Failed to get CSRF token"); + } } diff --git a/webui/js/components.js b/webui/js/components.js index 926c78e8d2..16612662f5 100644 --- a/webui/js/components.js +++ b/webui/js/components.js @@ -30,7 +30,8 @@ export async function importComponent(path, targetElement) { targetElement.innerHTML = '
'; // full component url - const componentUrl = "components/" + path; + const trimmedPath = path.replace(/^\/+/, ""); + const componentUrl = trimmedPath.startsWith("components/") ? trimmedPath : "components/" + trimmedPath; // get html from cache or fetch it let html; diff --git a/webui/js/confirmClick.js b/webui/js/confirmClick.js new file mode 100644 index 0000000000..76c0aa3925 --- /dev/null +++ b/webui/js/confirmClick.js @@ -0,0 +1,74 @@ +// Inline button two-click confirmation for destructive actions. +// First click arms, second click confirms, timeout resets. + +const CONFIRM_TIMEOUT = 2000; +const CONFIRM_CLASS = 'confirming'; +const CONFIRM_ICON = 'check'; +const CONFIRM_TEXT = 'Confirm'; + +const buttonStates = new WeakMap(); + +// Handles inline two-click confirmation for a button. +export function confirmClick(event, action) { + const button = event.currentTarget; + if (!button) return; + + const state = buttonStates.get(button); + + if (state?.confirming) { + clearTimeout(state.timeoutId); + resetButton(button, state); + buttonStates.delete(button); + action(); + } else { + const iconEl = button.querySelector('.material-symbols-outlined, .material-icons-outlined'); + const isIconButton = iconEl && button.textContent.trim() === iconEl.textContent.trim(); + + const newState = { + confirming: true, + isIconButton, + originalIcon: iconEl?.textContent?.trim(), + originalHTML: isIconButton ? null : button.innerHTML, + timeoutId: setTimeout(() => { + resetButton(button, newState); + buttonStates.delete(button); + }, CONFIRM_TIMEOUT) + }; + + buttonStates.set(button, newState); + button.classList.add(CONFIRM_CLASS); + + if (isIconButton && iconEl) { + // Icon-only button: just swap icon + iconEl.textContent = CONFIRM_ICON; + } else { + // Text button: show icon + optional "Confirm" text + const originalText = button.textContent.trim(); + const confirmContent = originalText.length >= 4 + ? `${CONFIRM_ICON}${CONFIRM_TEXT}` + : `${CONFIRM_ICON}`; + button.innerHTML = confirmContent; + } + } +} + +// Reset button to original state +function resetButton(button, state) { + button.classList.remove(CONFIRM_CLASS); + if (state.isIconButton) { + const iconEl = button.querySelector('.material-symbols-outlined, .material-icons-outlined'); + if (iconEl && state.originalIcon) { + iconEl.textContent = state.originalIcon; + } + } else if (state.originalHTML) { + button.innerHTML = state.originalHTML; + } +} + +// Register Alpine magic helper +export function registerAlpineMagic() { + if (globalThis.Alpine) { + Alpine.magic('confirmClick', () => confirmClick); + } +} + diff --git a/webui/js/file_browser.js b/webui/js/file_browser.js deleted file mode 100644 index 350feeefd8..0000000000 --- a/webui/js/file_browser.js +++ /dev/null @@ -1,268 +0,0 @@ -const fileBrowserModalProxy = { - isOpen: false, - isLoading: false, - - browser: { - title: "File Browser", - currentPath: "", - entries: [], - parentPath: "", - sortBy: "name", - sortDirection: "asc", - }, - - // Initialize navigation history - history: [], - - async openModal(path) { - const modalEl = document.getElementById("fileBrowserModal"); - const modalAD = Alpine.$data(modalEl); - - modalAD.isOpen = true; - modalAD.isLoading = true; - modalAD.history = []; // reset history when opening modal - - // Initialize currentPath to root if it's empty - if (path) modalAD.browser.currentPath = path; - else if (!modalAD.browser.currentPath) - modalAD.browser.currentPath = "$WORK_DIR"; - - await modalAD.fetchFiles(modalAD.browser.currentPath); - }, - - isArchive(filename) { - const archiveExts = ["zip", "tar", "gz", "rar", "7z"]; - const ext = filename.split(".").pop().toLowerCase(); - return archiveExts.includes(ext); - }, - - async fetchFiles(path = "") { - this.isLoading = true; - try { - const response = await fetchApi( - `/get_work_dir_files?path=${encodeURIComponent(path)}` - ); - - if (response.ok) { - const data = await response.json(); - this.browser.entries = data.data.entries; - this.browser.currentPath = data.data.current_path; - this.browser.parentPath = data.data.parent_path; - } else { - console.error("Error fetching files:", await response.text()); - this.browser.entries = []; - } - } catch (error) { - window.toastFrontendError("Error fetching files: " + error.message, "File Browser Error"); - this.browser.entries = []; - } finally { - this.isLoading = false; - } - }, - - async navigateToFolder(path) { - // Push current path to history before navigating - if (this.browser.currentPath !== path) { - this.history.push(this.browser.currentPath); - } - await this.fetchFiles(path); - }, - - async navigateUp() { - if (this.browser.parentPath !== "") { - // Push current path to history before navigating up - this.history.push(this.browser.currentPath); - await this.fetchFiles(this.browser.parentPath); - } - }, - - sortFiles(entries) { - return [...entries].sort((a, b) => { - // Folders always come first - if (a.is_dir !== b.is_dir) { - return a.is_dir ? -1 : 1; - } - - const direction = this.browser.sortDirection === "asc" ? 1 : -1; - switch (this.browser.sortBy) { - case "name": - return direction * a.name.localeCompare(b.name); - case "size": - return direction * (a.size - b.size); - case "date": - return direction * (new Date(a.modified) - new Date(b.modified)); - default: - return 0; - } - }); - }, - - toggleSort(column) { - if (this.browser.sortBy === column) { - this.browser.sortDirection = - this.browser.sortDirection === "asc" ? "desc" : "asc"; - } else { - this.browser.sortBy = column; - this.browser.sortDirection = "asc"; - } - }, - - async deleteFile(file) { - if (!confirm(`Are you sure you want to delete ${file.name}?`)) { - return; - } - - try { - const response = await fetchApi("/delete_work_dir_file", { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify({ - path: file.path, - currentPath: this.browser.currentPath, - }), - }); - - if (response.ok) { - const data = await response.json(); - this.browser.entries = this.browser.entries.filter( - (entry) => entry.path !== file.path - ); - alert("File deleted successfully."); - } else { - alert(`Error deleting file: ${await response.text()}`); - } - } catch (error) { - window.toastFrontendError("Error deleting file: " + error.message, "File Delete Error"); - alert("Error deleting file"); - } - }, - - async handleFileUpload(event) { - try { - const files = event.target.files; - if (!files.length) return; - - const formData = new FormData(); - formData.append("path", this.browser.currentPath); - - for (let i = 0; i < files.length; i++) { - const ext = files[i].name.split(".").pop().toLowerCase(); - if (!["zip", "tar", "gz", "rar", "7z"].includes(ext)) { - if (files[i].size > 100 * 1024 * 1024) { - // 100MB - alert( - `File ${files[i].name} exceeds the maximum allowed size of 100MB.` - ); - continue; - } - } - formData.append("files[]", files[i]); - } - - // Proceed with upload after validation - const response = await fetchApi("/upload_work_dir_files", { - method: "POST", - body: formData, - }); - - if (response.ok) { - const data = await response.json(); - // Update the file list with new data - this.browser.entries = data.data.entries.map((entry) => ({ - ...entry, - uploadStatus: data.failed.includes(entry.name) ? "failed" : "success", - })); - this.browser.currentPath = data.data.current_path; - this.browser.parentPath = data.data.parent_path; - - // Show success message - if (data.failed && data.failed.length > 0) { - const failedFiles = data.failed - .map((file) => `${file.name}: ${file.error}`) - .join("\n"); - alert(`Some files failed to upload:\n${failedFiles}`); - } - } else { - alert(data.message); - } - } catch (error) { - window.toastFrontendError("Error uploading files: " + error.message, "File Upload Error"); - alert("Error uploading files"); - } - }, - - downloadFile(file) { - const link = document.createElement("a"); - link.href = `/download_work_dir_file?path=${encodeURIComponent(file.path)}`; - link.download = file.name; - document.body.appendChild(link); - link.click(); - document.body.removeChild(link); - }, - - // Helper Functions - formatFileSize(size) { - if (size === 0) return "0 Bytes"; - const k = 1024; - const sizes = ["Bytes", "KB", "MB", "GB", "TB"]; - const i = Math.floor(Math.log(size) / Math.log(k)); - return parseFloat((size / Math.pow(k, i)).toFixed(2)) + " " + sizes[i]; - }, - - formatDate(dateString) { - const options = { - year: "numeric", - month: "short", - day: "numeric", - hour: "2-digit", - minute: "2-digit", - }; - return new Date(dateString).toLocaleDateString(undefined, options); - }, - - handleClose() { - this.isOpen = false; - }, -}; - -// Wait for Alpine to be ready -document.addEventListener("alpine:init", () => { - Alpine.data("fileBrowserModalProxy", () => ({ - init() { - Object.assign(this, fileBrowserModalProxy); - // Ensure immediate file fetch when modal opens - this.$watch("isOpen", async (value) => { - if (value) { - await this.fetchFiles(this.browser.currentPath); - } - }); - }, - })); -}); - -// Keep the global assignment for backward compatibility -window.fileBrowserModalProxy = fileBrowserModalProxy; - -openFileLink = async function (path) { - try { - const resp = await window.sendJsonData("/file_info", { path }); - if (!resp.exists) { - window.toastFrontendError("File does not exist.", "File Error"); - return; - } - - if (resp.is_dir) { - fileBrowserModalProxy.openModal(resp.abs_path); - } else { - fileBrowserModalProxy.downloadFile({ - path: resp.abs_path, - name: resp.file_name, - }); - } - } catch (e) { - window.toastFrontendError("Error opening file: " + e.message, "File Open Error"); - } -}; -window.openFileLink = openFileLink; diff --git a/webui/js/history.js b/webui/js/history.js deleted file mode 100644 index c071c53bf1..0000000000 --- a/webui/js/history.js +++ /dev/null @@ -1,55 +0,0 @@ -import { getContext } from "../index.js"; - -export async function openHistoryModal() { - try { - const hist = await window.sendJsonData("/history_get", { context: getContext() }); - // const data = JSON.stringify(hist.history, null, 4); - const data = hist.history - const size = hist.tokens - await showEditorModal(data, "markdown", `History ~${size} tokens`, "Conversation history visible to the LLM. History is compressed to fit into the context window over time."); - } catch (e) { - window.toastFrontendError("Error fetching history: " + e.message, "Chat History Error"); - return - } -} - -export async function openCtxWindowModal() { - try { - const win = await window.sendJsonData("/ctx_window_get", { context: getContext() }); - const data = win.content - const size = win.tokens - await showEditorModal(data, "markdown", `Context window ~${size} tokens`, "Data passed to the LLM during last interaction. Contains system message, conversation history and RAG."); - } catch (e) { - window.toastFrontendError("Error fetching context: " + e.message, "Context Error"); - return - } -} - -async function showEditorModal(data, type = "json", title, description = "") { - // Generate the HTML with JSON Viewer container - const html = `
`; - - // Open the modal with the generated HTML - await window.genericModalProxy.openModal(title, description, html, ["history-viewer"]); - - // Initialize the JSON Viewer after the modal is rendered - const container = document.getElementById("json-viewer-container"); - if (container) { - const editor = ace.edit("json-viewer-container"); - - const dark = localStorage.getItem('darkMode') - if (dark != "false") { - editor.setTheme("ace/theme/github_dark"); - } else { - editor.setTheme("ace/theme/tomorrow"); - } - - editor.session.setMode("ace/mode/" + type); - editor.setValue(data); - editor.clearSelection(); - // editor.session.$toggleFoldWidget(5, {}) - } -} - -window.openHistoryModal = openHistoryModal; -window.openCtxWindowModal = openCtxWindowModal; diff --git a/webui/js/image_modal.js b/webui/js/image_modal.js deleted file mode 100644 index 6c6bc1e493..0000000000 --- a/webui/js/image_modal.js +++ /dev/null @@ -1,87 +0,0 @@ -// Singleton interval ID for image refresh -let activeIntervalId = null; - -export async function openImageModal(src, refreshInterval = 0) { - try { - let imgSrc = src; - - // Clear any existing refresh interval - if (activeIntervalId !== null) { - clearInterval(activeIntervalId); - activeIntervalId = null; - } - - if (refreshInterval > 0) { - // Add or update timestamp to bypass cache - const addTimestamp = (url) => { - const urlObj = new URL(url, window.location.origin); - urlObj.searchParams.set('t', Date.now()); - return urlObj.toString(); - }; - - // Check if image viewer is still active - const isImageViewerActive = () => { - const container = document.querySelector('#image-viewer-container'); - if (!container) return false; - - // Check if element or any parent is hidden - let element = container; - while (element) { - const style = window.getComputedStyle(element); - if (style.display === 'none' || style.visibility === 'hidden' || style.opacity === '0') { - return false; - } - element = element.parentElement; - } - return true; - }; - - // Preload next image before displaying - const preloadAndUpdate = async (currentImg) => { - const nextSrc = addTimestamp(src); - // Create a promise that resolves when the image is loaded - const preloadPromise = new Promise((resolve, reject) => { - const tempImg = new Image(); - tempImg.onload = () => resolve(nextSrc); - tempImg.onerror = reject; - tempImg.src = nextSrc; - }); - - try { - // Wait for preload to complete - const loadedSrc = await preloadPromise; - // Check if this interval is still the active one - if (currentImg && isImageViewerActive()) { - currentImg.src = loadedSrc; - } - } catch (err) { - console.error('Failed to preload image:', err); - } - }; - - imgSrc = addTimestamp(src); - - // Set up periodic refresh with preloading - activeIntervalId = setInterval(() => { - if (!isImageViewerActive()) { - clearInterval(activeIntervalId); - activeIntervalId = null; - return; - } - const img = document.querySelector('.image-viewer-img'); - if (img) { - preloadAndUpdate(img); - } - }, refreshInterval); - } - - const html = `
`; - const fileName = src.split("/").pop(); - - // Open the modal with the generated HTML - await window.genericModalProxy.openModal(fileName, "", html); - } catch (e) { - window.toastFrontendError("Error fetching history: " + e.message, "Image History Error"); - return; - } -} diff --git a/webui/js/initFw.js b/webui/js/initFw.js index 69abde220f..f56ba15d33 100644 --- a/webui/js/initFw.js +++ b/webui/js/initFw.js @@ -1,6 +1,7 @@ import * as initializer from "./initializer.js"; import * as _modals from "./modals.js"; import * as _components from "./components.js"; +import { registerAlpineMagic } from "./confirmClick.js"; // initialize required elements await initializer.initialize(); @@ -8,17 +9,53 @@ await initializer.initialize(); // import alpine library await import("../vendor/alpine/alpine.min.js"); +// register $confirmClick magic helper for inline button confirmations +registerAlpineMagic(); + // add x-destroy directive to alpine Alpine.directive( - "destroy", - (el, { expression }, { evaluateLater, cleanup }) => { - const onDestroy = evaluateLater(expression); - cleanup(() => onDestroy()); - } -); - -// add x-create directive to alpine -Alpine.directive("create", (_el, { expression }, { evaluateLater }) => { - const onCreate = evaluateLater(expression); - onCreate(); -}); + "destroy", + (_el, { expression }, { evaluateLater, cleanup }) => { + const onDestroy = evaluateLater(expression); + cleanup(() => onDestroy()); + } + ); + + // add x-create directive to alpine + Alpine.directive( + "create", + (_el, { expression }, { evaluateLater }) => { + const onCreate = evaluateLater(expression); + onCreate(); + } + ); + + // run every second if the component is active + Alpine.directive( + "every-second", + (_el, { expression }, { evaluateLater, cleanup }) => { + const onTick = evaluateLater(expression); + const intervalId = setInterval(() => onTick(), 1000); + cleanup(() => clearInterval(intervalId)); + } + ); + + // run every minute if the component is active + Alpine.directive( + "every-minute", + (_el, { expression }, { evaluateLater, cleanup }) => { + const onTick = evaluateLater(expression); + const intervalId = setInterval(() => onTick(), 60_000); + cleanup(() => clearInterval(intervalId)); + } + ); + + // run every hour if the component is active + Alpine.directive( + "every-hour", + (_el, { expression }, { evaluateLater, cleanup }) => { + const onTick = evaluateLater(expression); + const intervalId = setInterval(() => onTick(), 3_600_000); + cleanup(() => clearInterval(intervalId)); + } + ); diff --git a/webui/js/messages.js b/webui/js/messages.js index 89c056eea4..5be8ccf10b 100644 --- a/webui/js/messages.js +++ b/webui/js/messages.js @@ -1,25 +1,136 @@ // message actions and components -import { openImageModal } from "./image_modal.js"; +import { store as imageViewerStore } from "../components/modals/image-viewer/image-viewer-store.js"; import { marked } from "../vendor/marked/marked.esm.js"; import { store as _messageResizeStore } from "/components/messages/resize/message-resize-store.js"; // keep here, required in html import { store as attachmentsStore } from "/components/chat/attachments/attachmentsStore.js"; import { addActionButtonsToElement } from "/components/messages/action-buttons/simple-action-buttons.js"; +import { store as processGroupStore } from "/components/messages/process-group/process-group-store.js"; +import { store as preferencesStore } from "/components/sidebar/bottom/preferences/preferences-store.js"; +import { formatDuration } from "./time-utils.js"; const chatHistory = document.getElementById("chat-history"); let messageGroup = null; +let currentProcessGroup = null; // Track current process group for collapsible UI +let currentDelegationSteps = {}; // Track delegation steps by agent number for nesting + +/** + * Resolve tool name from kvps, existing attribute, or previous siblings + * For 'tool' type steps, inherits from preceding step if not directly available + */ +function resolveToolName(type, kvps, stepElement) { + // Direct from kvps + if (kvps?.tool_name) return kvps.tool_name; + + // Keep existing if present (for non-tool types during updates) + if (type !== 'tool' && stepElement?.hasAttribute('data-tool-name')) { + return stepElement.getAttribute('data-tool-name'); + } + + // Inherit from previous sibling (for tool steps) + if (type === 'tool' && stepElement) { + let prev = stepElement.previousElementSibling; + while (prev) { + if (prev.hasAttribute('data-tool-name')) { + return prev.getAttribute('data-tool-name'); + } + prev = prev.previousElementSibling; + } + } + + return null; +} -// Simplified implementation - no complex interactions needed +/** + * Update status badge text content + */ +function updateBadgeText(badge, newCode) { + if (!badge) return; + badge.textContent = newCode; +} + +// Process types that should be grouped into collapsible sections +const PROCESS_TYPES = ['agent', 'tool', 'code_exe', 'browser', 'progress', 'info', 'hint', 'util', 'warning']; +// Main types that should always be visible (not collapsed) +const MAIN_TYPES = ['user', 'response', 'error', 'rate_limit']; -export function setMessage(id, type, heading, content, temp, kvps = null) { +export function setMessage(id, type, heading, content, temp, kvps = null, timestamp = null, durationMs = null, agentNumber = 0) { + // Check if this is a process type message + const isProcessType = PROCESS_TYPES.includes(type); + const isMainType = MAIN_TYPES.includes(type); + // Search for the existing message container by id let messageContainer = document.getElementById(`message-${id}`); + let processStepElement = document.getElementById(`process-step-${id}`); let isNewMessage = false; - if (messageContainer) { - // Don't clear innerHTML - we'll do incremental updates - // messageContainer.innerHTML = ""; - } else { + // For user messages, close current process group FIRST (start fresh for next interaction) + if (type === "user") { + currentProcessGroup = null; + currentDelegationSteps = {}; // Clear delegation tracking + } + + // For process types, check if we should add to process group + if (isProcessType) { + if (processStepElement) { + // Update existing process step + updateProcessStep(processStepElement, id, type, heading, content, kvps, durationMs, agentNumber); + return processStepElement; + } + + // Create or get process group for current interaction + if (!currentProcessGroup || !document.getElementById(currentProcessGroup.id)) { + currentProcessGroup = createProcessGroup(id); + chatHistory.appendChild(currentProcessGroup); + } + + // Add step to current process group + processStepElement = addProcessStep(currentProcessGroup, id, type, heading, content, kvps, timestamp, durationMs, agentNumber); + return processStepElement; + } + + // For subordinate agent responses (A1, A2, ...), treat as a process step instead of main response + // agentNumber: 0 = main agent, 1+ = subordinate agents + // Note: subordinate "response" is a completion marker with content + if (type === "response" && agentNumber !== 0) { + if (processStepElement) { + updateProcessStep(processStepElement, id, "response", heading, content, kvps, durationMs, agentNumber); + return processStepElement; + } + + // Create or get process group for current interaction + if (!currentProcessGroup || !document.getElementById(currentProcessGroup.id)) { + currentProcessGroup = createProcessGroup(id); + chatHistory.appendChild(currentProcessGroup); + } + + // Add subordinate response as a response step (special type to show content) + processStepElement = addProcessStep(currentProcessGroup, id, "response", heading, content, kvps, timestamp, durationMs, agentNumber); + return processStepElement; + } + + // For main agent (A0) response, embed the current process group and mark as complete + if (type === "response" && currentProcessGroup) { + const processGroupToEmbed = currentProcessGroup; + // Keep currentProcessGroup reference - subsequent process messages go to same group + + // Mark process group as complete (END state) + markProcessGroupComplete(processGroupToEmbed, heading); + + if (!messageContainer) { + // Create new container with embedded process group + messageContainer = createResponseContainerWithProcessGroup(id, processGroupToEmbed); + isNewMessage = true; + } else { + // Check if already embedded + const existingEmbedded = messageContainer.querySelector(".process-group"); + if (!existingEmbedded && processGroupToEmbed) { + embedProcessGroup(messageContainer, processGroupToEmbed); + } + } + } + + if (!messageContainer) { // Create a new container if not found isNewMessage = true; const sender = type === "user" ? "user" : "ai"; @@ -46,7 +157,8 @@ export function setMessage(id, type, heading, content, temp, kvps = null) { }; //force new group on these types const groupStart = { - agent: true, + response: true, // response starts a new group + user: true, // user message starts a new group (each user message should be separate) // anything else is false }; @@ -134,7 +246,7 @@ export function _drawMessage( // Update message classes messageDiv.className = `message ${mainClass} ${messageClasses.join(" ")}`; - // Handle heading + // Handle heading (important for error/rate_limit messages that show context) if (heading) { let headingElement = messageDiv.querySelector(".msg-heading"); if (!headingElement) { @@ -150,19 +262,6 @@ export function _drawMessage( } headingH4.innerHTML = convertIcons(escapeHTML(heading)); - if (resizeBtns) { - let minMaxBtn = headingElement.querySelector(".msg-min-max-btns"); - if (!minMaxBtn) { - minMaxBtn = document.createElement("div"); - minMaxBtn.classList.add("msg-min-max-btns"); - minMaxBtn.innerHTML = ` - - - `; - headingElement.appendChild(minMaxBtn); - } - } - } else { // Remove heading if it exists but heading is null const existingHeading = messageDiv.querySelector(".msg-heading"); if (existingHeading) { @@ -413,14 +512,11 @@ export function drawMessageUser( messageDiv.className = "message message-user"; } - // Handle heading + // Remove heading element if it exists (user messages no longer show label per target design) let headingElement = messageDiv.querySelector(".msg-heading"); - if (!headingElement) { - headingElement = document.createElement("h4"); - headingElement.classList.add("msg-heading"); - messageDiv.insertBefore(headingElement, messageDiv.firstChild); + if (headingElement) { + headingElement.remove(); } - headingElement.innerHTML = `${heading} person`; // Handle content let textDiv = messageDiv.querySelector(".message-text"); @@ -695,7 +791,9 @@ function drawKvps(container, kvps, latex) { for (let [key, value] of Object.entries(kvps)) { const row = table.insertRow(); row.classList.add("kvps-row"); - if (key === "thoughts" || key === "reasoning") + // Skip reasoning + if (key === "reasoning") continue; + if (key === "thoughts") // TODO: find a better way to determine special class assignment row.classList.add("msg-thoughts"); @@ -773,7 +871,8 @@ function drawKvpsIncremental(container, kvps, latex) { // Get all current rows for comparison let existingRows = table.querySelectorAll(".kvps-row"); - const kvpEntries = Object.entries(kvps); + // Filter out reasoning + const kvpEntries = Object.entries(kvps).filter(([key]) => key !== "reasoning"); // Update or create rows as needed kvpEntries.forEach(([key, value], index) => { @@ -787,7 +886,7 @@ function drawKvpsIncremental(container, kvps, latex) { // Update row classes row.className = "kvps-row"; - if (key === "thoughts" || key === "reasoning") { + if (key === "thoughts") { row.classList.add("msg-thoughts"); } @@ -852,7 +951,7 @@ function drawKvpsIncremental(container, kvps, latex) { // Add click handler and cursor change imgElement.style.cursor = "pointer"; imgElement.addEventListener("click", () => { - openImageModal(imgElement.src, 1000); + imageViewerStore.open(imgElement.src, { refreshInterval: 1000 }); }); } else { const pre = document.createElement("pre"); @@ -895,6 +994,22 @@ function convertToTitleCase(str) { }); } +/** + * Clean text value by removing standalone bracket lines and trimming + * Handles both strings and arrays (filters out bracket-only items) + */ +function cleanTextValue(value) { + if (Array.isArray(value)) { + return value + .filter(item => item && String(item).trim() && !/^[\[\]]$/.test(String(item).trim())) + .join("\n"); + } + if (typeof value === "object" && value !== null) { + return JSON.stringify(value, null, 2); + } + return String(value).replace(/^\s*[\[\]]\s*$/gm, "").trim(); +} + function convertImageTags(content) { // Regular expression to match tags and extract base64 content const imageTagRegex = /(.*?)<\/image>/g; @@ -1007,4 +1122,841 @@ class Scroller { reApplyScroll() { if (this.wasAtBottom) this.element.scrollTop = this.element.scrollHeight; } -} \ No newline at end of file +} + +// ============================================ +// Process Group Embedding Functions +// ============================================ + +/** + * Create a response container with an embedded process group + */ +function createResponseContainerWithProcessGroup(id, processGroup) { + const messageContainer = document.createElement("div"); + messageContainer.id = `message-${id}`; + messageContainer.classList.add("message-container", "ai-container", "has-process-group"); + + // Move process group from chatHistory into the container + if (processGroup && processGroup.parentNode) { + processGroup.parentNode.removeChild(processGroup); + } + + // Process group will be the first child + if (processGroup) { + processGroup.classList.add("embedded"); + messageContainer.appendChild(processGroup); + } + + return messageContainer; +} + +/** + * Embed a process group into an existing message container + */ +function embedProcessGroup(messageContainer, processGroup) { + if (!messageContainer || !processGroup) return; + + // Remove from current parent + if (processGroup.parentNode) { + processGroup.parentNode.removeChild(processGroup); + } + + // Add embedded class + processGroup.classList.add("embedded"); + messageContainer.classList.add("has-process-group"); + + // Insert at the beginning of the container + const firstChild = messageContainer.firstChild; + if (firstChild) { + messageContainer.insertBefore(processGroup, firstChild); + } else { + messageContainer.appendChild(processGroup); + } +} + +// ============================================ +// Process Group Functions +// ============================================ + +/** + * Create a new collapsible process group + */ +function createProcessGroup(id) { + const groupId = `process-group-${id}`; + const group = document.createElement("div"); + group.id = groupId; + group.classList.add("process-group"); + group.setAttribute("data-group-id", groupId); + + // Check initial expansion state from store (respects user preference) + const initiallyExpanded = processGroupStore.isGroupExpanded(groupId); + if (initiallyExpanded) { + group.classList.add('expanded'); + } + + // Create header + const header = document.createElement("div"); + header.classList.add("process-group-header"); + header.innerHTML = ` + + Processing... + GEN + + schedule--:-- + footprint0 + timer0s + + `; + + // Add click handler for expansion + header.addEventListener("click", (e) => { + processGroupStore.toggleGroup(groupId); + const newState = processGroupStore.isGroupExpanded(groupId); + group.classList.toggle("expanded", newState); + }); + + group.appendChild(header); + + // Create content container + const content = document.createElement("div"); + content.classList.add("process-group-content"); + + // Create steps container + const steps = document.createElement("div"); + steps.classList.add("process-steps"); + content.appendChild(steps); + + group.appendChild(content); + + return group; +} + +/** + * Create or get nested container within a parent step + */ +function getNestedContainer(parentStep) { + let nestedContainer = parentStep.querySelector(".process-nested-container"); + + if (!nestedContainer) { + // Create new container + nestedContainer = document.createElement("div"); + nestedContainer.classList.add("process-nested-container"); + + // Create inner wrapper for animation support + const innerWrapper = document.createElement("div"); + innerWrapper.classList.add("process-nested-inner"); + nestedContainer.appendChild(innerWrapper); + + parentStep.appendChild(nestedContainer); + parentStep.classList.add("has-nested-steps"); + } + + // Return the inner wrapper for appending steps + const innerWrapper = nestedContainer.querySelector(".process-nested-inner"); + return innerWrapper || nestedContainer; // Fallback to container if wrapper missing +} + +/** + * Add a step to a process group + */ +function addProcessStep(group, id, type, heading, content, kvps, timestamp = null, durationMs = null, agentNumber = 0) { + const groupId = group.getAttribute("data-group-id"); + let stepsContainer = group.querySelector(".process-steps"); + const isGroupCompleted = group.classList.contains("process-group-completed"); + + // Create step element + const step = document.createElement("div"); + step.id = `process-step-${id}`; + step.classList.add("process-step"); + step.setAttribute("data-type", type); + step.setAttribute("data-step-id", id); + step.setAttribute("data-agent-number", agentNumber); + + // Resolve tool name (direct, inherited, or null) + // For new steps, pass null as stepElement - inheritance uses stepsContainer query + let toolNameToUse = kvps?.tool_name; + if (type === 'tool' && !toolNameToUse) { + const existingSteps = stepsContainer.querySelectorAll('.process-step[data-tool-name]'); + if (existingSteps.length > 0) { + toolNameToUse = existingSteps[existingSteps.length - 1].getAttribute("data-tool-name"); + } + } + if (toolNameToUse) { + step.setAttribute("data-tool-name", toolNameToUse); + } + + // Store timestamp for duration calculation + if (timestamp) { + step.setAttribute("data-timestamp", timestamp); + + // Set group start time from first step + if (!group.getAttribute("data-start-timestamp")) { + group.setAttribute("data-start-timestamp", timestamp); + // Update header with formatted datetime + const timestampEl = group.querySelector(".group-timestamp"); + if (timestampEl) { + timestampEl.textContent = formatDateTime(timestamp); + } + } + } + + // Store duration from backend (used for final duration calculation) + if (durationMs != null) { + step.setAttribute("data-duration-ms", durationMs); + } + + // Add message-util class for utility/info types (controlled by showUtils preference) + if (type === "util" || type === "info" || type === "hint") { + step.classList.add("message-util"); + // Apply current preference state + if (preferencesStore.showUtils) { + step.classList.add("show-util"); + } + } + + // Get step info from heading (single source of truth: backend) + const title = getStepTitle(heading, kvps, type); + + // Check if step should be expanded + // Warning/error steps auto-expand to show content + const isStepExpanded = processGroupStore.isStepExpanded(groupId, id) || + (type === "warning" || type === "error"); + if (isStepExpanded) { + step.classList.add("step-expanded"); + } + + // Create step header + const stepHeader = document.createElement("div"); + stepHeader.classList.add("process-step-header"); + + // Status code and color class from store (maps backend types) + const statusCode = processGroupStore.getStepCode(type, toolNameToUse); + const statusColorClass = processGroupStore.getStatusColorClass(type, toolNameToUse); + + // Add status color class to step for cascading --step-accent to internal icons + step.classList.add(statusColorClass); + + const activeClass = isGroupCompleted ? "" : " status-active"; + stepHeader.innerHTML = ` + + ${statusCode} + ${escapeHTML(title)} + `; + + // Add click handler for step expansion + stepHeader.addEventListener("click", (e) => { + e.stopPropagation(); + processGroupStore.toggleStep(groupId, id); + const newState = processGroupStore.isStepExpanded(groupId, id); + // Explicitly add or remove the class based on state + if (newState) { + step.classList.add("step-expanded"); + } else { + step.classList.remove("step-expanded"); + } + }); + + step.appendChild(stepHeader); + + // Create step detail container + const detail = document.createElement("div"); + detail.classList.add("process-step-detail"); + + const detailContent = document.createElement("div"); + detailContent.classList.add("process-step-detail-content"); + + // Add content to detail + renderStepDetailContent(detailContent, content, kvps, type); + + detail.appendChild(detailContent); + step.appendChild(detail); + + // Track delegation steps for nesting + if (toolNameToUse === "call_subordinate") { + currentDelegationSteps[agentNumber] = step; + } + + // Determine where to append the step (main list or nested in parent) + let appendTarget = stepsContainer; + + // Check if this step belongs to a subordinate agent + if (agentNumber > 0 && currentDelegationSteps[agentNumber - 1]) { + const parentStep = currentDelegationSteps[agentNumber - 1]; + appendTarget = getNestedContainer(parentStep); + step.classList.add("nested-step"); + + // Auto-expand parent if this nested step is a warning/error + if (type === "warning" || type === "error") { + parentStep.classList.add("step-expanded"); + } + } + + // Remove status-active from all previous steps (only the current step is active) + const prevSteps = stepsContainer.querySelectorAll(".process-step .status-badge.status-active"); + prevSteps.forEach(badge => badge.classList.remove("status-active")); + + appendTarget.appendChild(step); + + // Update group header + updateProcessGroupHeader(group); + + return step; +} + +/** + * Update an existing process step + */ +function updateProcessStep(stepElement, id, type, heading, content, kvps, durationMs = null, agentNumber = 0) { + // Update title + const titleEl = stepElement.querySelector(".step-title"); + if (titleEl) { + const title = getStepTitle(heading, kvps, type); + titleEl.textContent = title; + } + + // Update duration from backend + if (durationMs != null) { + stepElement.setAttribute("data-duration-ms", durationMs); + } + + // Update agent number if provided + if (agentNumber !== undefined) { + stepElement.setAttribute("data-agent-number", agentNumber); + } + + // Resolve and update tool name + badge + const toolNameToUse = resolveToolName(type, kvps, stepElement); + if (toolNameToUse) { + stepElement.setAttribute("data-tool-name", toolNameToUse); + const newCode = processGroupStore.getStepCode(type, toolNameToUse); + updateBadgeText(stepElement.querySelector(".status-badge"), newCode); + } + + // Update detail content + const detailContent = stepElement.querySelector(".process-step-detail-content"); + let skipFullRender = false; + + if (detailContent) { + // For browser, update image src incrementally to avoid flashing + if (type === "browser" && kvps?.screenshot) { + const existingImg = detailContent.querySelector(".screenshot-img"); + const newSrc = kvps.screenshot.replace("img://", "/image_get?path="); + if (existingImg) { + // Only update if src actually changed + if (!existingImg.src.endsWith(newSrc.split("?path=")[1])) { + existingImg.src = newSrc; + } + // Skip full re-render to avoid flashing, but still update group header + skipFullRender = true; + } + } + + if (!skipFullRender) { + renderStepDetailContent(detailContent, content, kvps, type); + } + } + + // Update parent group header + const group = stepElement.closest(".process-group"); + if (group) { + updateProcessGroupHeader(group); + } +} + +/** + * Get a concise title for a process step + */ +function getStepTitle(heading, kvps, type) { + // Try to get a meaningful title from heading or kvps + if (heading && heading.trim()) { + return cleanStepTitle(heading, 80); + } + + // For warnings/errors without heading, use content preview as title + if ((type === "warning" || type === "error")) { + // We'll show full content in detail, so just use type as title + return type === "warning" ? "Warning" : "Error"; + } + + if (kvps) { + // Try common fields for title + if (kvps.tool_name) { + const headline = kvps.headline ? cleanStepTitle(kvps.headline, 60) : ''; + return `${kvps.tool_name}${headline ? ': ' + headline : ''}`; + } + if (kvps.headline) { + return cleanStepTitle(kvps.headline, 80); + } + if (kvps.query) { + return truncateText(kvps.query, 80); + } + if (kvps.thoughts) { + return truncateText(String(kvps.thoughts), 80); + } + } + + // Fallback: capitalize type (backend is source of truth) + return type ? type.charAt(0).toUpperCase() + type.slice(1).replace(/_/g, ' ') : 'Process'; +} + +/** + * Extract icon name from heading with icon:// prefix + * Returns the icon name (e.g., "terminal") or null if no prefix found + */ +function extractIconFromHeading(heading) { + if (!heading) return null; + const match = String(heading).match(/^icon:\/\/([a-zA-Z0-9_]+)/); + return match ? match[1] : null; +} + +/** + * Clean step title by removing icon:// prefixes and status phrases + * Preserves agent markers (A1:, A2:, etc.) so users can see which subordinate agent is executing + */ +function cleanStepTitle(text, maxLength) { + if (!text) return ""; + let cleaned = String(text); + + // Remove icon:// patterns (e.g., "icon://network_intelligence") + cleaned = cleaned.replace(/icon:\/\/[a-zA-Z0-9_]+\s*/g, ""); + + // Trim whitespace + cleaned = cleaned.trim(); + + return truncateText(cleaned, maxLength); +} + +/** + * Render content for step detail panel + */ +function renderStepDetailContent(container, content, kvps, type = null) { + container.innerHTML = ""; + + // Special handling for response type - show content as markdown (for subordinate responses) + if (type === "response" && content && content.trim()) { + const responseDiv = document.createElement("div"); + responseDiv.classList.add("step-response-content"); + + // Parse markdown + let processedContent = content; + processedContent = convertImageTags(processedContent); + processedContent = convertImgFilePaths(processedContent); + processedContent = marked.parse(processedContent, { breaks: true }); + processedContent = convertPathsToLinks(processedContent); + processedContent = addBlankTargetsToLinks(processedContent); + + responseDiv.innerHTML = processedContent; + container.appendChild(responseDiv); + return; + } + + // Special handling for warning/error types - always show content prominently + if ((type === "warning" || type === "error") && content && content.trim()) { + const warningDiv = document.createElement("div"); + warningDiv.classList.add("step-warning-content"); + warningDiv.textContent = content; + container.appendChild(warningDiv); + // Don't return - also show kvps if present + } + + // Special handling for code_exe type - render as terminal-style output + if (type === "code_exe" && kvps) { + const runtime = kvps.runtime || kvps.Runtime || "bash"; + const code = kvps.code || kvps.Code || ""; + const output = content || ""; + + if (code || output) { + const terminalDiv = document.createElement("div"); + terminalDiv.classList.add("step-terminal"); + + // Show output if present + if (output && output.trim()) { + const outputPre = document.createElement("pre"); + outputPre.classList.add("terminal-output"); + outputPre.textContent = truncateText(output, 1000); + terminalDiv.appendChild(outputPre); + } + + container.appendChild(terminalDiv); + } + + // Still render thoughts if present (but not reasoning - that's native model thinking, not structured output) + if (kvps.thoughts || kvps.thinking) { + const thoughtKey = kvps.thoughts ? "thoughts" : "thinking"; + const thoughtValue = kvps[thoughtKey]; + renderThoughts(container, thoughtValue); + } + + return; + } + + // Add KVPs if present + if (kvps && Object.keys(kvps).length > 0) { + const kvpsDiv = document.createElement("div"); + kvpsDiv.classList.add("step-kvps"); + + for (const [key, value] of Object.entries(kvps)) { + // Skip internal/display keys + if (key === "finished" || key === "attachments") continue; + + // Skip code_exe specific keys that we handle specially above + if (type === "code_exe" && (key.toLowerCase() === "runtime" || key.toLowerCase() === "session" || key.toLowerCase() === "code")) { + continue; + } + + const lowerKey = key.toLowerCase(); + + // Skip headline and tool_name - they're shown elsewhere + if (lowerKey === "headline" || lowerKey === "tool_name") continue; + + // Skip query in agent steps - it's shown in the tool call step + if (type === "agent" && lowerKey === "query") continue; + + // Special handling for thoughts - render with single lightbulb icon + // Skip reasoning + if (lowerKey === "reasoning") continue; + if (lowerKey === "thoughts" || lowerKey === "thinking" || lowerKey === "reflection") { + renderThoughts(kvpsDiv, value); + continue; + } + + // Special handling for tool_args - render only for tool/mcp types (skip for agent) + if (lowerKey === "tool_args") { + // Skip tool_args for agent steps - it's shown in the tool call step + if (type === "agent") continue; + + if (typeof value !== "object" || value === null) continue; + const argsDiv = document.createElement("div"); + argsDiv.classList.add("step-tool-args"); + + // Icon mapping for common tool arguments + const argIcons = { + 'query': 'search', + 'url': 'link', + 'path': 'folder', + 'file': 'description', + 'code': 'code', + 'command': 'terminal', + 'message': 'chat', + 'text': 'notes', + 'content': 'article', + 'name': 'label', + 'id': 'tag', + 'type': 'category', + 'document': 'description', + 'documents': 'folder_open', + 'queries': 'search' + }; + + for (const [argKey, argValue] of Object.entries(value)) { + const argRow = document.createElement("div"); + argRow.classList.add("tool-arg-row"); + + const argLabel = document.createElement("span"); + argLabel.classList.add("tool-arg-label"); + + // Use icon if available, otherwise use text label + const lowerArgKey = argKey.toLowerCase(); + if (argIcons[lowerArgKey]) { + argLabel.innerHTML = `${argIcons[lowerArgKey]}`; + } else { + argLabel.textContent = convertToTitleCase(argKey) + ":"; + } + + const argVal = document.createElement("span"); + argVal.classList.add("tool-arg-value"); + + const argText = cleanTextValue(argValue); + + argVal.textContent = truncateText(argText, 300); + + argRow.appendChild(argLabel); + argRow.appendChild(argVal); + argsDiv.appendChild(argRow); + } + + kvpsDiv.appendChild(argsDiv); + continue; + } + + const kvpDiv = document.createElement("div"); + kvpDiv.classList.add("step-kvp"); + + const keySpan = document.createElement("span"); + keySpan.classList.add("step-kvp-key"); + + // Icon mapping for common kvp keys + const kvpIcons = { + 'query': 'search', + 'url': 'link', + 'path': 'folder', + 'file': 'description', + 'code': 'code', + 'command': 'terminal', + 'message': 'chat', + 'text': 'notes', + 'content': 'article', + 'name': 'label', + 'id': 'tag', + 'type': 'category', + 'runtime': 'memory', + 'result': 'output', + 'progress': 'pending', + 'document': 'description', + 'documents': 'folder_open', + 'queries': 'search', + 'screenshot': 'image' + }; + + // lowerKey already defined above + if (kvpIcons[lowerKey]) { + keySpan.innerHTML = `${kvpIcons[lowerKey]}`; + } else { + keySpan.textContent = convertToTitleCase(key) + ":"; + } + + const valueSpan = document.createElement("div"); + valueSpan.classList.add("step-kvp-value"); + + if (typeof value === "string" && value.startsWith("img://")) { + const imgElement = document.createElement("img"); + imgElement.classList.add("screenshot-img"); + imgElement.src = value.replace("img://", "/image_get?path="); + imgElement.alt = "Image Attachment"; + imgElement.style.cursor = "pointer"; + imgElement.style.maxWidth = "100%"; + imgElement.style.display = "block"; + imgElement.style.marginTop = "4px"; + + // Add click handler and cursor change + imgElement.addEventListener("click", () => { + imageViewerStore.open(imgElement.src, { name: "Image Attachment" }); + }); + + valueSpan.appendChild(imgElement); + } else { + const valueText = cleanTextValue(value); + valueSpan.textContent = truncateText(valueText, 1000); + } + + kvpDiv.appendChild(keySpan); + kvpDiv.appendChild(valueSpan); + kvpsDiv.appendChild(kvpDiv); + } + + container.appendChild(kvpsDiv); + } + + // Add main content if present (JSON content) + if (content && content.trim()) { + const pre = document.createElement("pre"); + pre.classList.add("msg-json"); + pre.textContent = truncateText(content, 1000); + container.appendChild(pre); + } +} + +/** + * Helper to render thoughts/reasoning with lightbulb icon + */ +function renderThoughts(container, value) { + const thoughtsDiv = document.createElement("div"); + thoughtsDiv.classList.add("step-thoughts", "msg-thoughts"); + + const thoughtText = cleanTextValue(value); + + if (thoughtText) { + thoughtsDiv.innerHTML = `lightbulb${escapeHTML(thoughtText)}`; + container.appendChild(thoughtsDiv); + } +} + +/** + * Update process group header with step count, status, and metrics + */ +function updateProcessGroupHeader(group) { + const steps = group.querySelectorAll(".process-step"); + const titleEl = group.querySelector(".group-title"); + const statusEl = group.querySelector(".group-status"); + const metricsEl = group.querySelector(".group-metrics"); + const isCompleted = group.classList.contains("process-group-completed"); + + // If completed, only remove active badges and exit early (don't update metrics) + if (isCompleted) { + const activeBadges = group.querySelectorAll(".status-badge.status-active"); + activeBadges.forEach(badge => badge.classList.remove("status-active")); + return; + } + + // Update group title with the latest agent step heading + if (titleEl) { + // Find the last "agent" type step + const agentSteps = Array.from(steps).filter(step => step.getAttribute("data-type") === "agent"); + if (agentSteps.length > 0) { + const lastAgentStep = agentSteps[agentSteps.length - 1]; + const lastHeading = lastAgentStep.querySelector(".step-title")?.textContent; + if (lastHeading) { + const cleanTitle = cleanStepTitle(lastHeading, 50); + if (cleanTitle) { + titleEl.textContent = cleanTitle; + } + } + } + } + + // Update step count in metrics + const stepsMetricEl = metricsEl?.querySelector(".metric-steps .metric-value"); + if (stepsMetricEl) { + stepsMetricEl.textContent = steps.length.toString(); + } + + // Update time metric + const timeMetricEl = metricsEl?.querySelector(".metric-time .metric-value"); + const startTimestamp = group.getAttribute("data-start-timestamp"); + if (timeMetricEl && startTimestamp) { + const date = new Date(parseFloat(startTimestamp) * 1000); + const hours = String(date.getHours()).padStart(2, "0"); + const minutes = String(date.getMinutes()).padStart(2, "0"); + timeMetricEl.textContent = `${hours}:${minutes}`; + } + + // Update duration metric + const durationMetricEl = metricsEl?.querySelector(".metric-duration .metric-value"); + if (durationMetricEl && steps.length > 0) { + // Calculate accumulated duration from backend data + let accumulatedMs = 0; + steps.forEach(step => { + accumulatedMs += parseInt(step.getAttribute("data-duration-ms") || "0", 10); + }); + + // Check if last step is still in progress (no duration_ms set yet) + const lastStep = steps[steps.length - 1]; + const lastStepDuration = lastStep.getAttribute("data-duration-ms"); + const lastStepTimestamp = lastStep.getAttribute("data-timestamp"); + + if (lastStepDuration == null && lastStepTimestamp) { + // Last step is in progress - add live elapsed time for this step only + const lastStepStartMs = parseFloat(lastStepTimestamp) * 1000; + const liveElapsedMs = Math.max(0, Date.now() - lastStepStartMs); + accumulatedMs += liveElapsedMs; + } + + durationMetricEl.textContent = formatDuration(accumulatedMs); + } + + if (steps.length > 0) { + // Get the last step's type for status + const lastStep = steps[steps.length - 1]; + const lastType = lastStep.getAttribute("data-type"); + const lastToolName = lastStep.getAttribute("data-tool-name"); + const lastTitle = lastStep.querySelector(".step-title")?.textContent || ""; + + // Update status badge (keep status-active during execution) + if (statusEl) { + // Status code and color class from store (maps backend types) + const statusCode = processGroupStore.getStepCode(lastType, lastToolName); + const statusColorClass = processGroupStore.getStatusColorClass(lastType, lastToolName); + + statusEl.textContent = statusCode; + statusEl.className = `status-badge ${statusColorClass} status-active group-status`; + } + + // Update title + if (titleEl) { + // Prefer agent type steps for the group title as they contain thinking/planning info + if (lastType === "agent" && lastTitle) { + titleEl.textContent = cleanStepTitle(lastTitle, 50); + } else { + // Try to find the most recent agent step for a better title + const agentSteps = group.querySelectorAll('.process-step[data-type="agent"]'); + if (agentSteps.length > 0) { + const lastAgentStep = agentSteps[agentSteps.length - 1]; + const agentTitle = lastAgentStep.querySelector(".step-title")?.textContent || ""; + if (agentTitle) { + titleEl.textContent = cleanStepTitle(agentTitle, 50); + return; + } + } + titleEl.textContent = cleanStepTitle(lastTitle, 50) || `Processing...`; + } + } + } +} + +/** + * Truncate text to a maximum length + */ +function truncateText(text, maxLength) { + if (!text) return ""; + text = String(text).trim(); + if (text.length <= maxLength) return text; + return text.substring(0, maxLength - 3) + "..."; +} + +/** + * Mark a process group as complete (END state) + */ +function markProcessGroupComplete(group, responseTitle) { + if (!group) return; + + // Update status badge to END (remove status-active) + const statusEl = group.querySelector(".group-status"); + if (statusEl) { + statusEl.innerHTML = 'checkEND'; + statusEl.className = "status-badge status-end group-status"; // No status-active + } + + // Remove status-active from all step badges (stop spinners) + const stepBadges = group.querySelectorAll(".process-step .status-badge.status-active"); + stepBadges.forEach(badge => badge.classList.remove("status-active")); + + // Update title if response title is available + const titleEl = group.querySelector(".group-title"); + if (titleEl && responseTitle) { + const cleanTitle = cleanStepTitle(responseTitle, 50); + if (cleanTitle) { + titleEl.textContent = cleanTitle; + } + } + + // Add completed class to group + group.classList.add("process-group-completed"); + + // Calculate final duration from backend data (sum of all step durations) + const steps = group.querySelectorAll(".process-step"); + let totalDurationMs = 0; + steps.forEach(step => { + const durationMs = parseInt(step.getAttribute("data-duration-ms") || "0", 10); + totalDurationMs += durationMs; + }); + + // Update duration metric with final value from backend + const metricsEl = group.querySelector(".group-metrics"); + const durationMetricEl = metricsEl?.querySelector(".metric-duration .metric-value"); + if (durationMetricEl && totalDurationMs > 0) { + durationMetricEl.textContent = formatDuration(totalDurationMs); + } +} + +/** + * Reset process group state (called on context switch) + */ +export function resetProcessGroups() { + currentProcessGroup = null; + currentDelegationSteps = {}; + messageGroup = null; +} + +/** + * Format Unix timestamp as date-time string (YYYY-MM-DD HH:MM:SS) + */ +function formatDateTime(timestamp) { + const date = new Date(timestamp * 1000); // Convert seconds to milliseconds + const year = date.getFullYear(); + const month = String(date.getMonth() + 1).padStart(2, "0"); + const day = String(date.getDate()).padStart(2, "0"); + const hours = String(date.getHours()).padStart(2, "0"); + const minutes = String(date.getMinutes()).padStart(2, "0"); + const seconds = String(date.getSeconds()).padStart(2, "0"); + return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`; +} diff --git a/webui/js/modal.js b/webui/js/modal.js deleted file mode 100644 index 62438d0190..0000000000 --- a/webui/js/modal.js +++ /dev/null @@ -1,131 +0,0 @@ -const fullScreenInputModalProxy = { - isOpen: false, - inputText: '', - wordWrap: true, - undoStack: [], - redoStack: [], - maxStackSize: 100, - lastSavedState: '', - - openModal() { - const chatInput = document.getElementById('chat-input'); - this.inputText = chatInput.value; - this.lastSavedState = this.inputText; - this.isOpen = true; - this.undoStack = []; - this.redoStack = []; - - // Focus the full screen input after a short delay to ensure the modal is rendered - setTimeout(() => { - const fullScreenInput = document.getElementById('full-screen-input'); - fullScreenInput.focus(); - }, 100); - }, - - handleClose() { - const chatInput = document.getElementById('chat-input'); - chatInput.value = this.inputText; - chatInput.dispatchEvent(new Event('input')); // Trigger input event for textarea auto-resize - this.isOpen = false; - }, - - updateHistory() { - // Don't save if the text hasn't changed - if (this.lastSavedState === this.inputText) return; - - this.undoStack.push(this.lastSavedState); - if (this.undoStack.length > this.maxStackSize) { - this.undoStack.shift(); - } - this.redoStack = []; - this.lastSavedState = this.inputText; - }, - - undo() { - if (!this.canUndo) return; - - this.redoStack.push(this.inputText); - this.inputText = this.undoStack.pop(); - this.lastSavedState = this.inputText; - }, - - redo() { - if (!this.canRedo) return; - - this.undoStack.push(this.inputText); - this.inputText = this.redoStack.pop(); - this.lastSavedState = this.inputText; - }, - - clearText() { - if (this.inputText) { - this.updateHistory(); // Save current state before clearing - this.inputText = ''; - this.lastSavedState = ''; - } - }, - - toggleWrap() { - this.wordWrap = !this.wordWrap; - }, - - get canUndo() { - return this.undoStack.length > 0; - }, - - get canRedo() { - return this.redoStack.length > 0; - } -}; - -// Register the full screen input modal with Alpine as a store -document.addEventListener('alpine:init', () => { - Alpine.store('fullScreenInputModal', fullScreenInputModalProxy); -}); - -// Also register as a component for x-data usage -document.addEventListener('alpine:init', () => { - Alpine.data('fullScreenInputModalProxy', () => fullScreenInputModalProxy); -}); - -const genericModalProxy = { - isOpen: false, - isLoading: false, - title: '', - description: '', - html: '', - - async openModal(title, description, html, contentClasses = []) { - const modalEl = document.getElementById('genericModal'); - const modalContent = document.getElementById('viewer'); - const modalAD = Alpine.$data(modalEl); - - modalAD.isOpen = true; - modalAD.title = title - modalAD.description = description - modalAD.html = html - - modalContent.className = 'modal-content'; - modalContent.classList.add(...contentClasses); - }, - - handleClose() { - this.isOpen = false; - } -} - -// Wait for Alpine to be ready -document.addEventListener('alpine:init', () => { - Alpine.data('genericModalProxy', () => ({ - init() { - Object.assign(this, genericModalProxy); - // Ensure immediate file fetch when modal opens - this.$watch('isOpen', async (value) => { - // what now? - }); - } - })); -}); - -// Keep the global assignment for backward compatibility -window.genericModalProxy = genericModalProxy; \ No newline at end of file diff --git a/webui/js/modals.js b/webui/js/modals.js index 6c13728820..b147a25f75 100644 --- a/webui/js/modals.js +++ b/webui/js/modals.js @@ -9,13 +9,6 @@ const backdrop = document.createElement("div"); backdrop.className = "modal-backdrop"; backdrop.style.display = "none"; backdrop.style.backdropFilter = "blur(5px)"; - -// Make sure we only close when clicking directly on the backdrop, not its children -backdrop.addEventListener("click", (event) => { - if (event.target === backdrop) { - closeModal(); - } -}); document.body.appendChild(backdrop); // Function to update z-index for all modals and backdrop @@ -49,18 +42,22 @@ function updateModalZIndexes() { } // Function to create a new modal element -function createModalElement(name) { +function createModalElement(path) { // Create modal element const newModal = document.createElement("div"); newModal.className = "modal"; - newModal.modalName = name; // save name to the object + newModal.path = path; // save name to the object - // Add click handler to the modal element to close when clicking outside content - newModal.addEventListener("click", (event) => { - // Only close if clicking directly on the modal container, not its content - if (event.target === newModal) { + // Add click handlers to only close modal if both mousedown and mouseup are on the modal container + let mouseDownTarget = null; + newModal.addEventListener("mousedown", (event) => { + mouseDownTarget = event.target; + }); + newModal.addEventListener("mouseup", (event) => { + if (event.target === newModal && mouseDownTarget === newModal) { closeModal(); } + mouseDownTarget = null; }); @@ -74,6 +71,7 @@ function createModalElement(name) { +
`; @@ -92,10 +90,13 @@ function createModalElement(name) { updateModalZIndexes(); return { + path: path, element: newModal, title: newModal.querySelector(".modal-title"), body: newModal.querySelector(".modal-bd"), close: close_button, + footerSlot: newModal.querySelector(".modal-footer-slot"), + inner: newModal.querySelector(".modal-inner"), styles: [], scripts: [], }; @@ -106,7 +107,7 @@ export function openModal(modalPath) { return new Promise((resolve) => { try { // Create new modal instance - const modal = createModalElement(); + const modal = createModalElement(modalPath); new MutationObserver( (_, o) => @@ -135,6 +136,18 @@ export function openModal(modalPath) { if (doc.body && doc.body.classList) { modal.body.classList.add(...doc.body.classList); } + + // Some modals have a footer. Check if it exists and move it to footer slot + // Use requestAnimationFrame to let Alpine mount the component first + requestAnimationFrame(() => { + const componentFooter = modal.body.querySelector('[data-modal-footer]'); + if (componentFooter && modal.footerSlot) { + // Move footer outside modal-scroll scrollable area + modal.footerSlot.appendChild(componentFooter); + modal.footerSlot.style.display = 'block'; + modal.inner.classList.add('modal-with-footer'); + } + }); }) .catch((error) => { console.error("Error loading modal content:", error); @@ -143,6 +156,7 @@ export function openModal(modalPath) { // Add modal to stack and show it // Add modal to stack + modal.path = modalPath; modalStack.push(modal); modal.element.classList.add("show"); document.body.style.overflow = "hidden"; @@ -157,15 +171,15 @@ export function openModal(modalPath) { } // Function to close modal -export function closeModal(modalName = null) { +export function closeModal(modalPath = null) { if (modalStack.length === 0) return; let modalIndex = modalStack.length - 1; // Default to last modal let modal; - if (modalName) { + if (modalPath) { // Find the modal with the specified name in the stack - modalIndex = modalStack.findIndex((modal) => modal.modalName === modalName); + modalIndex = modalStack.findIndex((modal) => modal.path === modalPath); if (modalIndex === -1) return; // Modal not found in stack // Get the modal from stack at the found index @@ -188,24 +202,32 @@ export function closeModal(modalName = null) { // First remove the show class to trigger the transition modal.element.classList.remove("show"); - // Remove the modal element from DOM after animation - modal.element.addEventListener( - "transitionend", - () => { - // Make sure the modal is completely removed from the DOM - if (modal.element.parentNode) { - modal.element.parentNode.removeChild(modal.element); - } - }, - { once: true } - ); - - // Fallback in case the transition event doesn't fire - setTimeout(() => { - if (modal.element.parentNode) { - modal.element.parentNode.removeChild(modal.element); - } - }, 500); // 500ms should be enough for the transition to complete + // commented out to prevent race conditions + + // // Remove the modal element from DOM after animation + // modal.element.addEventListener( + // "transitionend", + // () => { + // // Make sure the modal is completely removed from the DOM + // if (modal.element.parentNode) { + // modal.element.parentNode.removeChild(modal.element); + // } + // }, + // { once: true } + // ); + + // // Fallback in case the transition event doesn't fire + // setTimeout(() => { + // if (modal.element.parentNode) { + // modal.element.parentNode.removeChild(modal.element); + // } + // }, 500); // 500ms should be enough for the transition to complete + + // remove immediately + if (modal.element.parentNode) { + modal.element.parentNode.removeChild(modal.element); + } + // Handle backdrop visibility and body overflow if (modalStack.length === 0) { diff --git a/webui/js/scheduler.js b/webui/js/scheduler.js deleted file mode 100644 index fbe2ad39cc..0000000000 --- a/webui/js/scheduler.js +++ /dev/null @@ -1,1702 +0,0 @@ -/** - * Task Scheduler Component for Settings Modal - * Manages scheduled and ad-hoc tasks through a dedicated settings tab - */ - -import { formatDateTime, getUserTimezone } from './time-utils.js'; -import { switchFromContext } from '../index.js'; - -// Ensure the showToast function is available -// if (typeof window.showToast !== 'function') { -// window.showToast = function(message, type = 'info') { -// console.log(`[Toast ${type}]: ${message}`); -// // Create toast element if not already present -// let toastContainer = document.getElementById('toast-container'); -// if (!toastContainer) { -// toastContainer = document.createElement('div'); -// toastContainer.id = 'toast-container'; -// toastContainer.style.position = 'fixed'; -// toastContainer.style.bottom = '20px'; -// toastContainer.style.right = '20px'; -// toastContainer.style.zIndex = '9999'; -// document.body.appendChild(toastContainer); -// } - -// // Create the toast -// const toast = document.createElement('div'); -// toast.className = `toast toast-${type}`; -// toast.style.padding = '10px 15px'; -// toast.style.margin = '5px 0'; -// toast.style.backgroundColor = type === 'error' ? '#f44336' : -// type === 'success' ? '#4CAF50' : -// type === 'warning' ? '#ff9800' : '#2196F3'; -// toast.style.color = 'white'; -// toast.style.borderRadius = '4px'; -// toast.style.boxShadow = '0 2px 5px rgba(0,0,0,0.2)'; -// toast.style.width = 'auto'; -// toast.style.maxWidth = '300px'; -// toast.style.wordWrap = 'break-word'; - -// toast.innerHTML = message; - -// // Add to container -// toastContainer.appendChild(toast); - -// // Auto remove after 3 seconds -// setTimeout(() => { -// if (toast.parentNode) { -// toast.style.opacity = '0'; -// toast.style.transition = 'opacity 0.5s ease'; -// setTimeout(() => { -// if (toast.parentNode) { -// toast.parentNode.removeChild(toast); -// } -// }, 500); -// } -// }, 3000); -// }; -// } - -// Add this near the top of the scheduler.js file, outside of any function -const showToast = function(message, type = 'info') { - // Use new frontend notification system - if (window.Alpine && window.Alpine.store && window.Alpine.store('notificationStore')) { - const store = window.Alpine.store('notificationStore'); - switch (type.toLowerCase()) { - case 'error': - return store.frontendError(message, "Scheduler", 5); - case 'success': - return store.frontendInfo(message, "Scheduler", 3); - case 'warning': - return store.frontendWarning(message, "Scheduler", 4); - case 'info': - default: - return store.frontendInfo(message, "Scheduler", 3); - } - } else { - // Fallback to global toast function or console - if (typeof window.toast === 'function') { - window.toast(message, type); - } else { - console.log(`SCHEDULER ${type.toUpperCase()}: ${message}`); - } - } -}; - -// Define the full component implementation -const fullComponentImplementation = function() { - return { - tasks: [], - isLoading: true, - selectedTask: null, - expandedTaskId: null, - sortField: 'name', - sortDirection: 'asc', - filterType: 'all', // all, scheduled, adhoc, planned - filterState: 'all', // all, idle, running, disabled, error - pollingInterval: null, - pollingActive: false, // Track if polling is currently active - editingTask: { - name: '', - type: 'scheduled', - state: 'idle', - schedule: { - minute: '*', - hour: '*', - day: '*', - month: '*', - weekday: '*', - timezone: getUserTimezone() - }, - token: '', - plan: { - todo: [], - in_progress: null, - done: [] - }, - system_prompt: '', - prompt: '', - attachments: [] - }, - isCreating: false, - isEditing: false, - showLoadingState: false, - viewMode: 'list', // Controls whether to show list or detail view - selectedTaskForDetail: null, // Task object for detail view - attachmentsText: '', - filteredTasks: [], - hasNoTasks: true, // Add explicit reactive property - - // Initialize the component - init() { - // Initialize component data - this.tasks = []; - this.isLoading = true; - this.hasNoTasks = true; // Add explicit reactive property - this.filterType = 'all'; - this.filterState = 'all'; - this.sortField = 'name'; - this.sortDirection = 'asc'; - this.pollingInterval = null; - this.pollingActive = false; - - // Start polling for tasks - this.startPolling(); - - // Refresh initial data - this.fetchTasks(); - - // Set up event handler for tab selection to ensure view is refreshed when tab becomes visible - document.addEventListener('click', (event) => { - // Check if a tab was clicked - const clickedTab = event.target.closest('.settings-tab'); - if (clickedTab && clickedTab.getAttribute('data-tab') === 'scheduler') { - setTimeout(() => { - this.fetchTasks(); - }, 100); - } - }); - - // Watch for changes to the tasks array to update UI - this.$watch('tasks', (newTasks) => { - this.updateTasksUI(); - }); - - this.$watch('filterType', () => { - this.updateTasksUI(); - }); - - this.$watch('filterState', () => { - this.updateTasksUI(); - }); - - // Set up default configuration - this.viewMode = localStorage.getItem('scheduler_view_mode') || 'list'; - this.selectedTask = null; - this.expandedTaskId = null; - this.editingTask = { - name: '', - type: 'scheduled', - state: 'idle', - schedule: { - minute: '*', - hour: '*', - day: '*', - month: '*', - weekday: '*', - timezone: getUserTimezone() - }, - token: this.generateRandomToken ? this.generateRandomToken() : '', - plan: { - todo: [], - in_progress: null, - done: [] - }, - system_prompt: '', - prompt: '', - attachments: [] - }; - - // Initialize Flatpickr for date/time pickers after Alpine is fully initialized - this.$nextTick(() => { - // Wait until DOM is updated - setTimeout(() => { - if (this.isCreating) { - this.initFlatpickr('create'); - } else if (this.isEditing) { - this.initFlatpickr('edit'); - } - }, 100); - }); - - // Cleanup on component destruction - this.$cleanup = () => { - console.log('Cleaning up schedulerSettings component'); - this.stopPolling(); - - // Clean up any Flatpickr instances - const createInput = document.getElementById('newPlannedTime-create'); - if (createInput && createInput._flatpickr) { - createInput._flatpickr.destroy(); - } - - const editInput = document.getElementById('newPlannedTime-edit'); - if (editInput && editInput._flatpickr) { - editInput._flatpickr.destroy(); - } - }; - }, - - // Start polling for task updates - startPolling() { - // Don't start if already polling - if (this.pollingInterval) { - console.log('Polling already active, not starting again'); - return; - } - - console.log('Starting task polling'); - this.pollingActive = true; - - // Fetch immediately, then set up interval for every 2 seconds - this.fetchTasks(); - this.pollingInterval = setInterval(() => { - if (this.pollingActive) { - this.fetchTasks(); - } - }, 2000); // Poll every 2 seconds as requested - }, - - // Stop polling when tab is inactive - stopPolling() { - console.log('Stopping task polling'); - this.pollingActive = false; - - if (this.pollingInterval) { - clearInterval(this.pollingInterval); - this.pollingInterval = null; - } - }, - - // Fetch tasks from API - async fetchTasks() { - // Don't fetch if polling is inactive (prevents race conditions) - if (!this.pollingActive && this.pollingInterval) { - return; - } - - // Don't fetch while creating/editing a task - if (this.isCreating || this.isEditing) { - return; - } - - this.isLoading = true; - try { - const response = await fetchApi('/scheduler_tasks_list', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - timezone: getUserTimezone() - }) - }); - - if (!response.ok) { - throw new Error('Failed to fetch tasks'); - } - - const data = await response.json(); - - // Check if data.tasks exists and is an array - if (!data || !data.tasks) { - console.error('Invalid response: data.tasks is missing', data); - this.tasks = []; - } else if (!Array.isArray(data.tasks)) { - console.error('Invalid response: data.tasks is not an array', data.tasks); - this.tasks = []; - } else { - // Verify each task has necessary properties - const validTasks = data.tasks.filter(task => { - if (!task || typeof task !== 'object') { - console.error('Invalid task (not an object):', task); - return false; - } - if (!task.uuid) { - console.error('Task missing uuid:', task); - return false; - } - if (!task.name) { - console.error('Task missing name:', task); - return false; - } - if (!task.type) { - console.error('Task missing type:', task); - return false; - } - return true; - }); - - if (validTasks.length !== data.tasks.length) { - console.warn(`Filtered out ${data.tasks.length - validTasks.length} invalid tasks`); - } - - this.tasks = validTasks; - - // Update UI using the shared function - this.updateTasksUI(); - } - } catch (error) { - console.error('Error fetching tasks:', error); - // Only show toast for errors on manual refresh, not during polling - if (!this.pollingInterval) { - showToast('Failed to fetch tasks: ' + error.message, 'error'); - } - // Reset tasks to empty array on error - this.tasks = []; - } finally { - this.isLoading = false; - } - }, - - // Change sort field/direction - changeSort(field) { - if (this.sortField === field) { - // Toggle direction if already sorting by this field - this.sortDirection = this.sortDirection === 'asc' ? 'desc' : 'asc'; - } else { - // Set new sort field and default to ascending - this.sortField = field; - this.sortDirection = 'asc'; - } - }, - - // Toggle expanded task row - toggleTaskExpand(taskId) { - if (this.expandedTaskId === taskId) { - this.expandedTaskId = null; - } else { - this.expandedTaskId = taskId; - } - }, - - // Show task detail view - showTaskDetail(taskId) { - const task = this.tasks.find(t => t.uuid === taskId); - if (!task) { - showToast('Task not found', 'error'); - return; - } - - // Create a copy of the task to avoid modifying the original - this.selectedTaskForDetail = JSON.parse(JSON.stringify(task)); - - // Ensure attachments is always an array - if (!this.selectedTaskForDetail.attachments) { - this.selectedTaskForDetail.attachments = []; - } - - this.viewMode = 'detail'; - }, - - // Close detail view and return to list - closeTaskDetail() { - this.selectedTaskForDetail = null; - this.viewMode = 'list'; - }, - - // Format date for display - formatDate(dateString) { - if (!dateString) return 'Never'; - return formatDateTime(dateString, 'full'); - }, - - // Format plan for display - formatPlan(task) { - if (!task || !task.plan) return 'No plan'; - - const todoCount = Array.isArray(task.plan.todo) ? task.plan.todo.length : 0; - const inProgress = task.plan.in_progress ? 'Yes' : 'No'; - const doneCount = Array.isArray(task.plan.done) ? task.plan.done.length : 0; - - let nextRun = ''; - if (Array.isArray(task.plan.todo) && task.plan.todo.length > 0) { - try { - const nextTime = new Date(task.plan.todo[0]); - - // Verify it's a valid date before formatting - if (!isNaN(nextTime.getTime())) { - nextRun = formatDateTime(nextTime, 'short'); - } else { - nextRun = 'Invalid date'; - console.warn(`Invalid date format in plan.todo[0]: ${task.plan.todo[0]}`); - } - } catch (error) { - console.error(`Error formatting next run time: ${error.message}`); - nextRun = 'Error'; - } - } else { - nextRun = 'None'; - } - - return `Next: ${nextRun}\nTodo: ${todoCount}\nIn Progress: ${inProgress}\nDone: ${doneCount}`; - }, - - // Format schedule for display - formatSchedule(task) { - if (!task.schedule) return 'None'; - - let schedule = ''; - if (typeof task.schedule === 'string') { - schedule = task.schedule; - } else if (typeof task.schedule === 'object') { - // Display only the cron parts, not the timezone - schedule = `${task.schedule.minute || '*'} ${task.schedule.hour || '*'} ${task.schedule.day || '*'} ${task.schedule.month || '*'} ${task.schedule.weekday || '*'}`; - } - - return schedule; - }, - - // Get CSS class for state badge - getStateBadgeClass(state) { - switch (state) { - case 'idle': return 'scheduler-status-idle'; - case 'running': return 'scheduler-status-running'; - case 'disabled': return 'scheduler-status-disabled'; - case 'error': return 'scheduler-status-error'; - default: return ''; - } - }, - - // Create a new task - startCreateTask() { - this.isCreating = true; - this.isEditing = false; - document.querySelector('[x-data="schedulerSettings"]')?.setAttribute('data-editing-state', 'creating'); - this.editingTask = { - name: '', - type: 'scheduled', // Default to scheduled - state: 'idle', // Initialize with idle state - schedule: { - minute: '*', - hour: '*', - day: '*', - month: '*', - weekday: '*', - timezone: getUserTimezone() - }, - token: this.generateRandomToken(), // Generate token even for scheduled tasks to prevent undefined errors - plan: { // Initialize plan for all task types to prevent undefined errors - todo: [], - in_progress: null, - done: [] - }, - system_prompt: '', - prompt: '', - attachments: [], // Always initialize as an empty array - }; - - // Set up Flatpickr after the component is visible - this.$nextTick(() => { - this.initFlatpickr('create'); - }); - }, - - // Edit an existing task - async startEditTask(taskId) { - const task = this.tasks.find(t => t.uuid === taskId); - if (!task) { - showToast('Task not found', 'error'); - return; - } - - this.isCreating = false; - this.isEditing = true; - document.querySelector('[x-data="schedulerSettings"]')?.setAttribute('data-editing-state', 'editing'); - - // Create a deep copy to avoid modifying the original - this.editingTask = JSON.parse(JSON.stringify(task)); - - // Debug log - console.log('Task data for editing:', task); - console.log('Attachments from task:', task.attachments); - - // Ensure state is set with a default if missing - if (!this.editingTask.state) this.editingTask.state = 'idle'; - - // Always initialize schedule to prevent UI errors - // All task types need this structure for the form to work properly - if (!this.editingTask.schedule || typeof this.editingTask.schedule === 'string') { - let scheduleObj = { - minute: '*', - hour: '*', - day: '*', - month: '*', - weekday: '*', - timezone: getUserTimezone() - }; - - // If it's a string, parse it - if (typeof this.editingTask.schedule === 'string') { - const parts = this.editingTask.schedule.split(' '); - if (parts.length >= 5) { - scheduleObj.minute = parts[0] || '*'; - scheduleObj.hour = parts[1] || '*'; - scheduleObj.day = parts[2] || '*'; - scheduleObj.month = parts[3] || '*'; - scheduleObj.weekday = parts[4] || '*'; - } - } - - this.editingTask.schedule = scheduleObj; - } else { - // Ensure timezone exists in the schedule - if (!this.editingTask.schedule.timezone) { - this.editingTask.schedule.timezone = getUserTimezone(); - } - } - - // Ensure attachments is always an array - if (!this.editingTask.attachments) { - this.editingTask.attachments = []; - } else if (typeof this.editingTask.attachments === 'string') { - // Handle case where attachments might be stored as a string - this.editingTask.attachments = this.editingTask.attachments - .split('\n') - .map(line => line.trim()) - .filter(line => line.length > 0); - } else if (!Array.isArray(this.editingTask.attachments)) { - // If not an array or string, set to empty array - this.editingTask.attachments = []; - } - - // Ensure appropriate properties are initialized based on task type - if (this.editingTask.type === 'scheduled') { - // Initialize token for scheduled tasks to prevent undefined errors if UI accesses it - if (!this.editingTask.token) { - this.editingTask.token = ''; - } - - // Initialize plan stub for scheduled tasks to prevent undefined errors - if (!this.editingTask.plan) { - this.editingTask.plan = { - todo: [], - in_progress: null, - done: [] - }; - } - } else if (this.editingTask.type === 'adhoc') { - // Initialize token if it doesn't exist - if (!this.editingTask.token) { - this.editingTask.token = this.generateRandomToken(); - console.log('Generated new token for adhoc task:', this.editingTask.token); - } - - console.log('Setting token for adhoc task:', this.editingTask.token); - - // Initialize plan stub for adhoc tasks to prevent undefined errors - if (!this.editingTask.plan) { - this.editingTask.plan = { - todo: [], - in_progress: null, - done: [] - }; - } - } else if (this.editingTask.type === 'planned') { - // Initialize plan if it doesn't exist - if (!this.editingTask.plan) { - this.editingTask.plan = { - todo: [], - in_progress: null, - done: [] - }; - } - - // Ensure todo is an array - if (!Array.isArray(this.editingTask.plan.todo)) { - this.editingTask.plan.todo = []; - } - - // Initialize token to prevent undefined errors - if (!this.editingTask.token) { - this.editingTask.token = ''; - } - } - - // Set up Flatpickr after the component is visible and task data is loaded - this.$nextTick(() => { - this.initFlatpickr('edit'); - }); - }, - - // Cancel editing - cancelEdit() { - // Clean up Flatpickr instances - const destroyFlatpickr = (inputId) => { - const input = document.getElementById(inputId); - if (input && input._flatpickr) { - console.log(`Destroying Flatpickr instance for ${inputId}`); - input._flatpickr.destroy(); - - // Also remove any wrapper elements that might have been created - const wrapper = input.closest('.scheduler-flatpickr-wrapper'); - if (wrapper && wrapper.parentNode) { - // Move the input back to its original position - wrapper.parentNode.insertBefore(input, wrapper); - // Remove the wrapper - wrapper.parentNode.removeChild(wrapper); - } - - // Remove any added classes - input.classList.remove('scheduler-flatpickr-input'); - } - }; - - if (this.isCreating) { - destroyFlatpickr('newPlannedTime-create'); - } else if (this.isEditing) { - destroyFlatpickr('newPlannedTime-edit'); - } - - // Reset to initial state but keep default values to prevent errors - this.editingTask = { - name: '', - type: 'scheduled', - state: 'idle', // Initialize with idle state - schedule: { - minute: '*', - hour: '*', - day: '*', - month: '*', - weekday: '*', - timezone: getUserTimezone() - }, - token: '', - plan: { // Initialize plan for planned tasks - todo: [], - in_progress: null, - done: [] - }, - system_prompt: '', - prompt: '', - attachments: [], // Always initialize as an empty array - }; - this.isCreating = false; - this.isEditing = false; - document.querySelector('[x-data="schedulerSettings"]')?.removeAttribute('data-editing-state'); - }, - - // Save task (create new or update existing) - async saveTask() { - // Validate task data - if (!this.editingTask.name.trim() || !this.editingTask.prompt.trim()) { - // showToast('Task name and prompt are required', 'error'); - alert('Task name and prompt are required'); - return; - } - - try { - let apiEndpoint, taskData; - - // Prepare task data - taskData = { - name: this.editingTask.name, - system_prompt: this.editingTask.system_prompt || '', - prompt: this.editingTask.prompt || '', - state: this.editingTask.state || 'idle', // Include state in task data - timezone: getUserTimezone() - }; - - // Process attachments - now always stored as array - taskData.attachments = Array.isArray(this.editingTask.attachments) - ? this.editingTask.attachments - .map(line => typeof line === 'string' ? line.trim() : line) - .filter(line => line && line.trim().length > 0) - : []; - - // Handle task type specific data - if (this.editingTask.type === 'scheduled') { - // Ensure schedule is properly formatted as an object - if (typeof this.editingTask.schedule === 'string') { - // Parse string schedule into object - const parts = this.editingTask.schedule.split(' '); - taskData.schedule = { - minute: parts[0] || '*', - hour: parts[1] || '*', - day: parts[2] || '*', - month: parts[3] || '*', - weekday: parts[4] || '*', - timezone: getUserTimezone() // Add timezone to schedule object - }; - } else { - // Use object schedule directly but ensure timezone is included - taskData.schedule = { - ...this.editingTask.schedule, - timezone: this.editingTask.schedule.timezone || getUserTimezone() - }; - } - // Don't send token or plan for scheduled tasks - delete taskData.token; - delete taskData.plan; - } else if (this.editingTask.type === 'adhoc') { - // Ad-hoc task with token - // Ensure token is a non-empty string, generate a new one if needed - if (!this.editingTask.token) { - this.editingTask.token = this.generateRandomToken(); - console.log('Generated new token for adhoc task:', this.editingTask.token); - } - - console.log('Setting token in taskData:', this.editingTask.token); - taskData.token = this.editingTask.token; - - // Don't send schedule or plan for adhoc tasks - delete taskData.schedule; - delete taskData.plan; - } else if (this.editingTask.type === 'planned') { - // Planned task with plan - // Make sure plan exists and has required properties - if (!this.editingTask.plan) { - this.editingTask.plan = { - todo: [], - in_progress: null, - done: [] - }; - } - - // Ensure todo and done are arrays - if (!Array.isArray(this.editingTask.plan.todo)) { - this.editingTask.plan.todo = []; - } - - if (!Array.isArray(this.editingTask.plan.done)) { - this.editingTask.plan.done = []; - } - - // Validate each date in the todo list to ensure it's a valid ISO string - const validatedTodo = []; - for (const dateStr of this.editingTask.plan.todo) { - try { - const date = new Date(dateStr); - if (!isNaN(date.getTime())) { - validatedTodo.push(date.toISOString()); - } else { - console.warn(`Skipping invalid date in todo list: ${dateStr}`); - } - } catch (error) { - console.warn(`Error processing date: ${error.message}`); - } - } - - // Replace with validated list - this.editingTask.plan.todo = validatedTodo; - - // Sort the todo items by date (earliest first) - this.editingTask.plan.todo.sort(); - - // Set the plan in taskData - taskData.plan = { - todo: this.editingTask.plan.todo, - in_progress: this.editingTask.plan.in_progress, - done: this.editingTask.plan.done || [] - }; - - // Log the plan data for debugging - console.log('Planned task plan data:', JSON.stringify(taskData.plan, null, 2)); - - // Don't send schedule or token for planned tasks - delete taskData.schedule; - delete taskData.token; - } - - // Determine if creating or updating - if (this.isCreating) { - apiEndpoint = '/scheduler_task_create'; - } else { - apiEndpoint = '/scheduler_task_update'; - taskData.task_id = this.editingTask.uuid; - } - - // Debug: Log the final task data being sent - console.log('Final task data being sent to API:', JSON.stringify(taskData, null, 2)); - - // Make API request - const response = await fetchApi(apiEndpoint, { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify(taskData) - }); - - if (!response.ok) { - const errorData = await response.json(); - throw new Error(errorData.error || 'Failed to save task'); - } - - // Parse response data to get the created/updated task - const responseData = await response.json(); - - // Show success message - showToast(this.isCreating ? 'Task created successfully' : 'Task updated successfully', 'success'); - - // Immediately update the UI if the response includes the task - if (responseData && responseData.task) { - console.log('Task received in response:', responseData.task); - - // Update the tasks array - if (this.isCreating) { - // For new tasks, add to the array - this.tasks = [...this.tasks, responseData.task]; - } else { - // For updated tasks, replace the existing one - this.tasks = this.tasks.map(t => - t.uuid === responseData.task.uuid ? responseData.task : t - ); - } - - // Update UI using the shared function - this.updateTasksUI(); - } else { - // Fallback to fetching tasks if no task in response - await this.fetchTasks(); - } - - // Clean up Flatpickr instances - const destroyFlatpickr = (inputId) => { - const input = document.getElementById(inputId); - if (input && input._flatpickr) { - input._flatpickr.destroy(); - } - }; - - if (this.isCreating) { - destroyFlatpickr('newPlannedTime-create'); - } else if (this.isEditing) { - destroyFlatpickr('newPlannedTime-edit'); - } - - // Reset task data and form state - this.editingTask = { - name: '', - type: 'scheduled', - state: 'idle', - schedule: { - minute: '*', - hour: '*', - day: '*', - month: '*', - weekday: '*', - timezone: getUserTimezone() - }, - token: '', - plan: { - todo: [], - in_progress: null, - done: [] - }, - system_prompt: '', - prompt: '', - attachments: [] - }; - this.isCreating = false; - this.isEditing = false; - document.querySelector('[x-data="schedulerSettings"]')?.removeAttribute('data-editing-state'); - } catch (error) { - console.error('Error saving task:', error); - showToast('Failed to save task: ' + error.message, 'error'); - } - }, - - // Run a task - async runTask(taskId) { - try { - const response = await fetchApi('/scheduler_task_run', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - task_id: taskId, - timezone: getUserTimezone() - }) - }); - - if (!response.ok) { - const errorData = await response.json(); - throw new Error(errorData.error || 'Failed to run task'); - } - - showToast('Task started successfully', 'success'); - - // Refresh task list - this.fetchTasks(); - } catch (error) { - console.error('Error running task:', error); - showToast('Failed to run task: ' + error.message, 'error'); - } - }, - - // Reset a task's state - async resetTaskState(taskId) { - try { - const task = this.tasks.find(t => t.uuid === taskId); - if (!task) { - showToast('Task not found', 'error'); - return; - } - - // Check if task is already in idle state - if (task.state === 'idle') { - showToast('Task is already in idle state', 'info'); - return; - } - - this.showLoadingState = true; - - // Call API to update the task state - const response = await fetchApi('/scheduler_task_update', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - task_id: taskId, - state: 'idle', // Always reset to idle state - timezone: getUserTimezone() - }) - }); - - if (!response.ok) { - const errorData = await response.json(); - throw new Error(errorData.error || 'Failed to reset task state'); - } - - showToast('Task state reset to idle', 'success'); - - // Refresh task list - await this.fetchTasks(); - this.showLoadingState = false; - } catch (error) { - console.error('Error resetting task state:', error); - showToast('Failed to reset task state: ' + error.message, 'error'); - this.showLoadingState = false; - } - }, - - // Delete a task - async deleteTask(taskId) { - // Confirm deletion - if (!confirm('Are you sure you want to delete this task? This action cannot be undone.')) { - return; - } - - try { - - // if we delete selected context, switch to another first - switchFromContext(taskId); - - const response = await fetchApi('/scheduler_task_delete', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - task_id: taskId, - timezone: getUserTimezone() - }) - }); - - if (!response.ok) { - const errorData = await response.json(); - throw new Error(errorData.error || 'Failed to delete task'); - } - - showToast('Task deleted successfully', 'success'); - - // If we were viewing the detail of the deleted task, close the detail view - if (this.selectedTaskForDetail && this.selectedTaskForDetail.uuid === taskId) { - this.closeTaskDetail(); - } - - // Immediately update UI without waiting for polling - this.tasks = this.tasks.filter(t => t.uuid !== taskId); - - // Update UI using the shared function - this.updateTasksUI(); - } catch (error) { - console.error('Error deleting task:', error); - showToast('Failed to delete task: ' + error.message, 'error'); - } - }, - - // Initialize datetime input with default value (30 minutes from now) - initDateTimeInput(event) { - if (!event.target.value) { - const now = new Date(); - now.setMinutes(now.getMinutes() + 30); - - // Format as YYYY-MM-DDThh:mm - const year = now.getFullYear(); - const month = String(now.getMonth() + 1).padStart(2, '0'); - const day = String(now.getDate()).padStart(2, '0'); - const hours = String(now.getHours()).padStart(2, '0'); - const minutes = String(now.getMinutes()).padStart(2, '0'); - - event.target.value = `${year}-${month}-${day}T${hours}:${minutes}`; - - // If using Flatpickr, update it as well - if (event.target._flatpickr) { - event.target._flatpickr.setDate(event.target.value); - } - } - }, - - // Generate a random token for ad-hoc tasks - generateRandomToken() { - const characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; - let token = ''; - for (let i = 0; i < 16; i++) { - token += characters.charAt(Math.floor(Math.random() * characters.length)); - } - return token; - }, - - // Getter for filtered tasks - get filteredTasks() { - // Make sure we have tasks to filter - if (!Array.isArray(this.tasks)) { - console.warn('Tasks is not an array:', this.tasks); - return []; - } - - let filtered = [...this.tasks]; - - // Apply type filter with case-insensitive comparison - if (this.filterType && this.filterType !== 'all') { - filtered = filtered.filter(task => { - if (!task.type) return false; - return String(task.type).toLowerCase() === this.filterType.toLowerCase(); - }); - } - - // Apply state filter with case-insensitive comparison - if (this.filterState && this.filterState !== 'all') { - filtered = filtered.filter(task => { - if (!task.state) return false; - return String(task.state).toLowerCase() === this.filterState.toLowerCase(); - }); - } - - // Sort the filtered tasks - return this.sortTasks(filtered); - }, - - // Sort the tasks based on sort field and direction - sortTasks(tasks) { - if (!Array.isArray(tasks) || tasks.length === 0) { - return tasks; - } - - return [...tasks].sort((a, b) => { - if (!this.sortField) return 0; - - const fieldA = a[this.sortField]; - const fieldB = b[this.sortField]; - - // Handle cases where fields might be undefined - if (fieldA === undefined && fieldB === undefined) return 0; - if (fieldA === undefined) return 1; - if (fieldB === undefined) return -1; - - // For dates, convert to timestamps - if (this.sortField === 'createdAt' || this.sortField === 'updatedAt') { - const dateA = new Date(fieldA).getTime(); - const dateB = new Date(fieldB).getTime(); - return this.sortDirection === 'asc' ? dateA - dateB : dateB - dateA; - } - - // For string comparisons - if (typeof fieldA === 'string' && typeof fieldB === 'string') { - return this.sortDirection === 'asc' - ? fieldA.localeCompare(fieldB) - : fieldB.localeCompare(fieldA); - } - - // For numerical comparisons - return this.sortDirection === 'asc' ? fieldA - fieldB : fieldB - fieldA; - }); - }, - - // Computed property for attachments text representation - get attachmentsText() { - // Ensure we always have an array to work with - const attachments = Array.isArray(this.editingTask.attachments) - ? this.editingTask.attachments - : []; - - // Join array items with newlines - return attachments.join('\n'); - }, - - // Setter for attachments text - preserves empty lines during editing - set attachmentsText(value) { - if (typeof value === 'string') { - // Just split by newlines without filtering to preserve editing experience - this.editingTask.attachments = value.split('\n'); - } else { - // Fallback to empty array if not a string - this.editingTask.attachments = []; - } - }, - - // Debug method to test filtering logic - testFiltering() { - console.group('SchedulerSettings Debug: Filter Test'); - console.log('Current Filter Settings:'); - console.log('- Filter Type:', this.filterType); - console.log('- Filter State:', this.filterState); - console.log('- Sort Field:', this.sortField); - console.log('- Sort Direction:', this.sortDirection); - - // Check if tasks is an array - if (!Array.isArray(this.tasks)) { - console.error('ERROR: this.tasks is not an array!', this.tasks); - console.groupEnd(); - return; - } - - console.log(`Raw Tasks (${this.tasks.length}):`, this.tasks); - - // Test filtering by type - console.group('Filter by Type Test'); - ['all', 'adhoc', 'scheduled', 'recurring'].forEach(type => { - const filtered = this.tasks.filter(task => - type === 'all' || - (task.type && String(task.type).toLowerCase() === type) - ); - console.log(`Type "${type}": ${filtered.length} tasks`, filtered); - }); - console.groupEnd(); - - // Test filtering by state - console.group('Filter by State Test'); - ['all', 'idle', 'running', 'completed', 'failed'].forEach(state => { - const filtered = this.tasks.filter(task => - state === 'all' || - (task.state && String(task.state).toLowerCase() === state) - ); - console.log(`State "${state}": ${filtered.length} tasks`, filtered); - }); - console.groupEnd(); - - // Show current filtered tasks - console.log('Current Filtered Tasks:', this.filteredTasks); - - console.groupEnd(); - }, - - // New comprehensive debug method - debugTasks() { - console.group('SchedulerSettings Comprehensive Debug'); - - // Component state - console.log('Component State:'); - console.log({ - filterType: this.filterType, - filterState: this.filterState, - sortField: this.sortField, - sortDirection: this.sortDirection, - isLoading: this.isLoading, - isEditing: this.isEditing, - isCreating: this.isCreating, - viewMode: this.viewMode - }); - - // Tasks validation - if (!this.tasks) { - console.error('ERROR: this.tasks is undefined or null!'); - console.groupEnd(); - return; - } - - if (!Array.isArray(this.tasks)) { - console.error('ERROR: this.tasks is not an array!', typeof this.tasks, this.tasks); - console.groupEnd(); - return; - } - - // Raw tasks - console.group('Raw Tasks'); - console.log(`Count: ${this.tasks.length}`); - if (this.tasks.length > 0) { - console.table(this.tasks.map(t => ({ - uuid: t.uuid, - name: t.name, - type: t.type, - state: t.state - }))); - - // Inspect first task in detail - console.log('First Task Structure:', JSON.stringify(this.tasks[0], null, 2)); - } else { - console.log('No tasks available'); - } - console.groupEnd(); - - // Filtered tasks - console.group('Filtered Tasks'); - const filteredTasks = this.filteredTasks; - console.log(`Count: ${filteredTasks.length}`); - if (filteredTasks.length > 0) { - console.table(filteredTasks.map(t => ({ - uuid: t.uuid, - name: t.name, - type: t.type, - state: t.state - }))); - } else { - console.log('No filtered tasks'); - } - console.groupEnd(); - - // Check for potential issues - console.group('Potential Issues'); - - // Check for case mismatches - if (this.tasks.length > 0 && filteredTasks.length === 0) { - console.warn('Filter seems to exclude all tasks. Checking why:'); - - // Check type values - const uniqueTypes = [...new Set(this.tasks.map(t => t.type))]; - console.log('Unique task types in data:', uniqueTypes); - - // Check state values - const uniqueStates = [...new Set(this.tasks.map(t => t.state))]; - console.log('Unique task states in data:', uniqueStates); - - // Check for exact mismatches - if (this.filterType !== 'all') { - const typeMatch = this.tasks.some(t => - t.type && String(t.type).toLowerCase() === this.filterType.toLowerCase() - ); - console.log(`Type "${this.filterType}" matches found:`, typeMatch); - } - - if (this.filterState !== 'all') { - const stateMatch = this.tasks.some(t => - t.state && String(t.state).toLowerCase() === this.filterState.toLowerCase() - ); - console.log(`State "${this.filterState}" matches found:`, stateMatch); - } - } - - // Check for undefined or null values - const hasUndefinedType = this.tasks.some(t => t.type === undefined || t.type === null); - const hasUndefinedState = this.tasks.some(t => t.state === undefined || t.state === null); - - if (hasUndefinedType) { - console.warn('Some tasks have undefined or null type values!'); - } - - if (hasUndefinedState) { - console.warn('Some tasks have undefined or null state values!'); - } - - console.groupEnd(); - - console.groupEnd(); - }, - - // Initialize Flatpickr datetime pickers for both create and edit forms - /** - * Initialize Flatpickr date/time pickers for scheduler forms - * - * @param {string} mode - Which pickers to initialize: 'all', 'create', or 'edit' - * @returns {void} - */ - initFlatpickr(mode = 'all') { - const initPicker = (inputId, refName, wrapperClass, options = {}) => { - // Try to get input using Alpine.js x-ref first (more reliable) - let input = this.$refs[refName]; - - // Fall back to getElementById if x-ref is not available - if (!input) { - input = document.getElementById(inputId); - console.log(`Using getElementById fallback for ${inputId}`); - } - - if (!input) { - console.warn(`Input element ${inputId} not found by ID or ref`); - return null; - } - - // Create a wrapper around the input - const wrapper = document.createElement('div'); - wrapper.className = wrapperClass || 'scheduler-flatpickr-wrapper'; - wrapper.style.overflow = 'visible'; // Ensure dropdown can escape container - - // Replace the input with our wrapped version - input.parentNode.insertBefore(wrapper, input); - wrapper.appendChild(input); - input.classList.add('scheduler-flatpickr-input'); - - // Default options - const defaultOptions = { - dateFormat: "Y-m-d H:i", - enableTime: true, - time_24hr: true, - static: false, // Not static so it will float - appendTo: document.body, // Append to body to avoid overflow issues - theme: "scheduler-theme", - allowInput: true, - positionElement: wrapper, // Position relative to wrapper - onOpen: function(selectedDates, dateStr, instance) { - // Ensure calendar is properly positioned and visible - instance.calendarContainer.style.zIndex = '9999'; - instance.calendarContainer.style.position = 'absolute'; - instance.calendarContainer.style.visibility = 'visible'; - instance.calendarContainer.style.opacity = '1'; - - // Add class to calendar container for our custom styling - instance.calendarContainer.classList.add('scheduler-theme'); - }, - // Set default date to 30 minutes from now if no date selected - onReady: function(selectedDates, dateStr, instance) { - if (!dateStr) { - const now = new Date(); - now.setMinutes(now.getMinutes() + 30); - instance.setDate(now, true); - } - } - }; - - // Merge options - const mergedOptions = {...defaultOptions, ...options}; - - // Initialize flatpickr - const fp = flatpickr(input, mergedOptions); - - // Add a clear button - const clearButton = document.createElement('button'); - clearButton.className = 'scheduler-flatpickr-clear'; - clearButton.innerHTML = 'Γ—'; - clearButton.type = 'button'; - clearButton.addEventListener('click', (e) => { - e.preventDefault(); - e.stopPropagation(); - if (fp) { - fp.clear(); - } - }); - wrapper.appendChild(clearButton); - - return fp; - }; - - // Clear any existing Flatpickr instances to prevent duplication - if (mode === 'all' || mode === 'create') { - const createInput = document.getElementById('newPlannedTime-create'); - if (createInput && createInput._flatpickr) { - createInput._flatpickr.destroy(); - } - } - - if (mode === 'all' || mode === 'edit') { - const editInput = document.getElementById('newPlannedTime-edit'); - if (editInput && editInput._flatpickr) { - editInput._flatpickr.destroy(); - } - } - - // Initialize new instances - if (mode === 'all' || mode === 'create') { - initPicker('newPlannedTime-create', 'plannedTimeCreate', 'scheduler-flatpickr-wrapper', { - minuteIncrement: 5, - defaultHour: new Date().getHours(), - defaultMinute: Math.ceil(new Date().getMinutes() / 5) * 5 - }); - } - - if (mode === 'all' || mode === 'edit') { - initPicker('newPlannedTime-edit', 'plannedTimeEdit', 'scheduler-flatpickr-wrapper', { - minuteIncrement: 5, - defaultHour: new Date().getHours(), - defaultMinute: Math.ceil(new Date().getMinutes() / 5) * 5 - }); - } - }, - - // Update tasks UI - updateTasksUI() { - // First update filteredTasks if that method exists - if (typeof this.updateFilteredTasks === 'function') { - this.updateFilteredTasks(); - } - - // Wait for UI to update - this.$nextTick(() => { - // Get empty state and task list elements - const emptyElement = document.querySelector('.scheduler-empty'); - const tableElement = document.querySelector('.scheduler-task-list'); - - // Calculate visibility state based on filtered tasks - const hasFilteredTasks = Array.isArray(this.filteredTasks) && this.filteredTasks.length > 0; - - // Update visibility directly - if (emptyElement) { - emptyElement.style.display = !hasFilteredTasks ? '' : 'none'; - } - - if (tableElement) { - tableElement.style.display = hasFilteredTasks ? '' : 'none'; - } - }); - } - }; -}; - - -// Only define the component if it doesn't already exist or extend the existing one -if (!window.schedulerSettings) { - console.log('Defining schedulerSettings component from scratch'); - window.schedulerSettings = fullComponentImplementation; -} else { - console.log('Extending existing schedulerSettings component'); - // Store the original function - const originalSchedulerSettings = window.schedulerSettings; - - // Replace with enhanced version that merges the pre-initialized stub with the full implementation - window.schedulerSettings = function() { - // Get the base pre-initialized component - const baseComponent = originalSchedulerSettings(); - - // Create a backup of the original init function - const originalInit = baseComponent.init || function() {}; - - // Create our enhanced init function that adds the missing functionality - baseComponent.init = function() { - // Call the original init if it exists - originalInit.call(this); - - console.log('Enhanced init running: adding missing methods to component'); - - // Get the full implementation - const fullImpl = fullComponentImplementation(); - - // Add essential methods directly - const essentialMethods = [ - 'fetchTasks', 'startPolling', 'stopPolling', - 'startCreateTask', 'startEditTask', 'cancelEdit', - 'saveTask', 'runTask', 'resetTaskState', 'deleteTask', - 'toggleTaskExpand', 'showTaskDetail', 'closeTaskDetail', - 'changeSort', 'formatDate', 'formatPlan', 'formatSchedule', - 'getStateBadgeClass', 'generateRandomToken', 'testFiltering', - 'debugTasks', 'sortTasks', 'initFlatpickr', 'initDateTimeInput', - 'updateTasksUI' - ]; - - essentialMethods.forEach(method => { - if (typeof this[method] !== 'function' && typeof fullImpl[method] === 'function') { - console.log(`Adding missing method: ${method}`); - this[method] = fullImpl[method]; - } - }); - - // hack to expose deleteTask - window.deleteTaskGlobal = this.deleteTask.bind(this); - - // Make sure we have a filteredTasks array initialized - this.filteredTasks = []; - - // Initialize essential properties if missing - if (!Array.isArray(this.tasks)) { - this.tasks = []; - } - - // Make sure attachmentsText getter/setter are defined - if (!Object.getOwnPropertyDescriptor(this, 'attachmentsText')?.get) { - Object.defineProperty(this, 'attachmentsText', { - get: function() { - // Ensure we always have an array to work with - const attachments = Array.isArray(this.editingTask?.attachments) - ? this.editingTask.attachments - : []; - - // Join array items with newlines - return attachments.join('\n'); - }, - set: function(value) { - if (!this.editingTask) { - this.editingTask = { attachments: [] }; - } - - if (typeof value === 'string') { - // Just split by newlines without filtering to preserve editing experience - this.editingTask.attachments = value.split('\n'); - } else { - // Fallback to empty array if not a string - this.editingTask.attachments = []; - } - } - }); - } - - // Add methods for updating filteredTasks directly - if (typeof this.updateFilteredTasks !== 'function') { - this.updateFilteredTasks = function() { - // Make sure we have tasks to filter - if (!Array.isArray(this.tasks)) { - this.filteredTasks = []; - return; - } - - let filtered = [...this.tasks]; - - // Apply type filter with case-insensitive comparison - if (this.filterType && this.filterType !== 'all') { - filtered = filtered.filter(task => { - if (!task.type) return false; - return String(task.type).toLowerCase() === this.filterType.toLowerCase(); - }); - } - - // Apply state filter with case-insensitive comparison - if (this.filterState && this.filterState !== 'all') { - filtered = filtered.filter(task => { - if (!task.state) return false; - return String(task.state).toLowerCase() === this.filterState.toLowerCase(); - }); - } - - // Sort the filtered tasks - if (typeof this.sortTasks === 'function') { - filtered = this.sortTasks(filtered); - } - - // Directly update the filteredTasks property - this.filteredTasks = filtered; - }; - } - - // Set up watchers to update filtered tasks when dependencies change - this.$nextTick(() => { - // Update filtered tasks when raw tasks change - this.$watch('tasks', () => { - this.updateFilteredTasks(); - }); - - // Update filtered tasks when filter type changes - this.$watch('filterType', () => { - this.updateFilteredTasks(); - }); - - // Update filtered tasks when filter state changes - this.$watch('filterState', () => { - this.updateFilteredTasks(); - }); - - // Update filtered tasks when sort field or direction changes - this.$watch('sortField', () => { - this.updateFilteredTasks(); - }); - - this.$watch('sortDirection', () => { - this.updateFilteredTasks(); - }); - - // Initial update - this.updateFilteredTasks(); - - // Set up watcher for task type changes to initialize Flatpickr for planned tasks - this.$watch('editingTask.type', (newType) => { - if (newType === 'planned') { - this.$nextTick(() => { - // Reinitialize Flatpickr when switching to planned task type - if (this.isCreating) { - this.initFlatpickr('create'); - } else if (this.isEditing) { - this.initFlatpickr('edit'); - } - }); - } - }); - - // Initialize Flatpickr - this.$nextTick(() => { - if (typeof this.initFlatpickr === 'function') { - this.initFlatpickr(); - } else { - console.error('initFlatpickr is not available'); - } - }); - }); - - // Try fetching tasks after a short delay - setTimeout(() => { - if (typeof this.fetchTasks === 'function') { - this.fetchTasks(); - } else { - console.error('fetchTasks still not available after enhancement'); - } - }, 100); - - console.log('Enhanced init complete'); - }; - - return baseComponent; - }; -} - -// Force Alpine.js to register the component immediately -if (window.Alpine) { - // Alpine is already loaded, register now - console.log('Alpine already loaded, registering schedulerSettings component now'); - window.Alpine.data('schedulerSettings', window.schedulerSettings); -} else { - // Wait for Alpine to load - document.addEventListener('alpine:init', () => { - console.log('Alpine:init - immediately registering schedulerSettings component'); - Alpine.data('schedulerSettings', window.schedulerSettings); - }); -} - -// Add a document ready event handler to ensure the scheduler tab can be clicked on first load -document.addEventListener('DOMContentLoaded', function() { - console.log('DOMContentLoaded - setting up scheduler tab click handler'); - // Setup scheduler tab click handling - const setupSchedulerTab = () => { - const settingsModal = document.getElementById('settingsModal'); - if (!settingsModal) { - setTimeout(setupSchedulerTab, 100); - return; - } - - // Create a global event listener for clicks on the scheduler tab - document.addEventListener('click', function(e) { - // Find if the click was on the scheduler tab or its children - const schedulerTab = e.target.closest('.settings-tab[title="Task Scheduler"]'); - if (!schedulerTab) return; - - e.preventDefault(); - e.stopPropagation(); - - // Get the settings modal data - try { - const modalData = Alpine.$data(settingsModal); - if (modalData.activeTab !== 'scheduler') { - // Directly call the modal's switchTab method - modalData.switchTab('scheduler'); - } - - // Force start polling and fetch tasks immediately when tab is selected - setTimeout(() => { - // Get the scheduler component data - const schedulerElement = document.querySelector('[x-data="schedulerSettings"]'); - if (schedulerElement) { - const schedulerData = Alpine.$data(schedulerElement); - - // Force fetch tasks and start polling - if (typeof schedulerData.fetchTasks === 'function') { - schedulerData.fetchTasks(); - } else { - console.error('fetchTasks is not a function on scheduler component'); - } - - if (typeof schedulerData.startPolling === 'function') { - schedulerData.startPolling(); - } else { - console.error('startPolling is not a function on scheduler component'); - } - } else { - console.error('Could not find scheduler component element'); - } - }, 100); - } catch (err) { - console.error('Error handling scheduler tab click:', err); - } - }, true); // Use capture phase to intercept before Alpine.js handlers - }; - - // Initialize the tab handling - setupSchedulerTab(); -}); diff --git a/webui/js/settings.js b/webui/js/settings.js deleted file mode 100644 index 059d645410..0000000000 --- a/webui/js/settings.js +++ /dev/null @@ -1,591 +0,0 @@ -const settingsModalProxy = { - isOpen: false, - settings: {}, - resolvePromise: null, - activeTab: 'agent', // Default tab - provider: 'cloudflared', - - // Computed property for filtered sections - get filteredSections() { - if (!this.settings || !this.settings.sections) return []; - const filteredSections = this.settings.sections.filter(section => section.tab === this.activeTab); - - // If no sections match the current tab (or all tabs are missing), show all sections - if (filteredSections.length === 0) { - return this.settings.sections; - } - - return filteredSections; - }, - - // Switch tab method - switchTab(tabName) { - // Update our component state - this.activeTab = tabName; - - // Update the store safely - const store = Alpine.store('root'); - if (store) { - store.activeTab = tabName; - } - - localStorage.setItem('settingsActiveTab', tabName); - - // Auto-scroll active tab into view after a short delay to ensure DOM updates - setTimeout(() => { - const activeTab = document.querySelector('.settings-tab.active'); - if (activeTab) { - activeTab.scrollIntoView({ behavior: 'smooth', block: 'nearest', inline: 'center' }); - } - - // When switching to the scheduler tab, initialize Flatpickr components - if (tabName === 'scheduler') { - console.log('Switching to scheduler tab, initializing Flatpickr'); - const schedulerElement = document.querySelector('[x-data="schedulerSettings"]'); - if (schedulerElement) { - const schedulerData = Alpine.$data(schedulerElement); - if (schedulerData) { - // Start polling - if (typeof schedulerData.startPolling === 'function') { - schedulerData.startPolling(); - } - - // Initialize Flatpickr if editing or creating - if (typeof schedulerData.initFlatpickr === 'function') { - // Check if we're creating or editing and initialize accordingly - if (schedulerData.isCreating) { - schedulerData.initFlatpickr('create'); - } else if (schedulerData.isEditing) { - schedulerData.initFlatpickr('edit'); - } - } - - // Force an immediate fetch - if (typeof schedulerData.fetchTasks === 'function') { - schedulerData.fetchTasks(); - } - } - } - } - }, 10); - }, - - async openModal() { - console.log('Settings modal opening'); - const modalEl = document.getElementById('settingsModal'); - const modalAD = Alpine.$data(modalEl); - - // First, ensure the store is updated properly - const store = Alpine.store('root'); - if (store) { - // Set isOpen first to ensure proper state - store.isOpen = true; - } - - //get settings from backend - try { - const set = await sendJsonData("/settings_get", null); - - // First load the settings data without setting the active tab - const settings = { - "title": "Settings", - "buttons": [ - { - "id": "save", - "title": "Save", - "classes": "btn btn-ok" - }, - { - "id": "cancel", - "title": "Cancel", - "type": "secondary", - "classes": "btn btn-cancel" - } - ], - "sections": set.settings.sections - } - - // Update modal data - modalAD.isOpen = true; - modalAD.settings = settings; - - // Now set the active tab after the modal is open - // This ensures Alpine reactivity works as expected - setTimeout(() => { - // Get stored tab or default to 'agent' - const savedTab = localStorage.getItem('settingsActiveTab') || 'agent'; - console.log(`Setting initial tab to: ${savedTab}`); - - // Directly set the active tab - modalAD.activeTab = savedTab; - - // Also update the store - if (store) { - store.activeTab = savedTab; - } - - localStorage.setItem('settingsActiveTab', savedTab); - - // Add a small delay *after* setting the tab to ensure scrolling works - setTimeout(() => { - const activeTabElement = document.querySelector('.settings-tab.active'); - if (activeTabElement) { - activeTabElement.scrollIntoView({ behavior: 'smooth', block: 'nearest', inline: 'center' }); - } - // Debug log - const schedulerTab = document.querySelector('.settings-tab[title="Task Scheduler"]'); - console.log(`Current active tab after direct set: ${modalAD.activeTab}`); - console.log('Scheduler tab active after direct initialization?', - schedulerTab && schedulerTab.classList.contains('active')); - - // Explicitly start polling if we're on the scheduler tab - if (modalAD.activeTab === 'scheduler') { - console.log('Settings opened directly to scheduler tab, initializing polling'); - const schedulerElement = document.querySelector('[x-data="schedulerSettings"]'); - if (schedulerElement) { - const schedulerData = Alpine.$data(schedulerElement); - if (schedulerData && typeof schedulerData.startPolling === 'function') { - schedulerData.startPolling(); - // Also force an immediate fetch - if (typeof schedulerData.fetchTasks === 'function') { - schedulerData.fetchTasks(); - } - } - } - } - }, 10); // Small delay just for scrolling - - }, 5); // Keep a minimal delay for modal opening reactivity - - // Add a watcher to disable the Save button when a task is being created or edited - const schedulerComponent = document.querySelector('[x-data="schedulerSettings"]'); - if (schedulerComponent) { - // Watch for changes to the scheduler's editing state - const checkSchedulerEditingState = () => { - const schedulerData = Alpine.$data(schedulerComponent); - if (schedulerData) { - // If we're on the scheduler tab and creating/editing a task, disable the Save button - const saveButton = document.querySelector('.modal-footer button.btn-ok'); - if (saveButton && modalAD.activeTab === 'scheduler' && - (schedulerData.isCreating || schedulerData.isEditing)) { - saveButton.disabled = true; - saveButton.classList.add('btn-disabled'); - } else if (saveButton) { - saveButton.disabled = false; - saveButton.classList.remove('btn-disabled'); - } - } - }; - - // Add a mutation observer to detect changes in the scheduler component's state - const observer = new MutationObserver(checkSchedulerEditingState); - observer.observe(schedulerComponent, { attributes: true, subtree: true, childList: true }); - - // Also watch for tab changes to update button state - modalAD.$watch('activeTab', checkSchedulerEditingState); - - // Initial check - setTimeout(checkSchedulerEditingState, 100); - } - - return new Promise(resolve => { - this.resolvePromise = resolve; - }); - - } catch (e) { - window.toastFetchError("Error getting settings", e) - } - }, - - async handleButton(buttonId) { - if (buttonId === 'save') { - - const modalEl = document.getElementById('settingsModal'); - const modalAD = Alpine.$data(modalEl); - try { - resp = await window.sendJsonData("/settings_set", modalAD.settings); - } catch (e) { - window.toastFetchError("Error saving settings", e) - return - } - document.dispatchEvent(new CustomEvent('settings-updated', { detail: resp.settings })); - this.resolvePromise({ - status: 'saved', - data: resp.settings - }); - } else if (buttonId === 'cancel') { - this.handleCancel(); - } - - // Stop scheduler polling if it's running - this.stopSchedulerPolling(); - - // First update our component state - this.isOpen = false; - - // Then safely update the store - const store = Alpine.store('root'); - if (store) { - // Use a slight delay to avoid reactivity issues - setTimeout(() => { - store.isOpen = false; - }, 10); - } - }, - - async handleCancel() { - this.resolvePromise({ - status: 'cancelled', - data: null - }); - - // Stop scheduler polling if it's running - this.stopSchedulerPolling(); - - // First update our component state - this.isOpen = false; - - // Then safely update the store - const store = Alpine.store('root'); - if (store) { - // Use a slight delay to avoid reactivity issues - setTimeout(() => { - store.isOpen = false; - }, 10); - } - }, - - // Add a helper method to stop scheduler polling - stopSchedulerPolling() { - // Find the scheduler component and stop polling if it exists - const schedulerElement = document.querySelector('[x-data="schedulerSettings"]'); - if (schedulerElement) { - const schedulerData = Alpine.$data(schedulerElement); - if (schedulerData && typeof schedulerData.stopPolling === 'function') { - console.log('Stopping scheduler polling on modal close'); - schedulerData.stopPolling(); - } - } - }, - - async handleFieldButton(field) { - console.log(`Button clicked: ${field.id}`); - - if (field.id === "mcp_servers_config") { - openModal("settings/mcp/client/mcp-servers.html"); - } else if (field.id === "backup_create") { - openModal("settings/backup/backup.html"); - } else if (field.id === "backup_restore") { - openModal("settings/backup/restore.html"); - } else if (field.id === "show_a2a_connection") { - openModal("settings/external/a2a-connection.html"); - } else if (field.id === "external_api_examples") { - openModal("settings/external/api-examples.html"); - } else if (field.id === "memory_dashboard") { - openModal("settings/memory/memory-dashboard.html"); - } - } -}; - - -// function initSettingsModal() { - -// window.openSettings = function () { -// proxy.openModal().then(result => { -// console.log(result); // This will log the result when the modal is closed -// }); -// } - -// return proxy -// } - - -// document.addEventListener('alpine:init', () => { -// Alpine.store('settingsModal', initSettingsModal()); -// }); - -document.addEventListener('alpine:init', function () { - // Initialize the root store first to ensure it exists before components try to access it - Alpine.store('root', { - activeTab: localStorage.getItem('settingsActiveTab') || 'agent', - isOpen: false, - - toggleSettings() { - this.isOpen = !this.isOpen; - } - }); - - // Then initialize other Alpine components - Alpine.data('settingsModal', function () { - return { - settingsData: {}, - filteredSections: [], - activeTab: 'agent', - isLoading: true, - - async init() { - // Initialize with the store value - this.activeTab = Alpine.store('root').activeTab || 'agent'; - - // Watch store tab changes - this.$watch('$store.root.activeTab', (newTab) => { - if (typeof newTab !== 'undefined') { - this.activeTab = newTab; - localStorage.setItem('settingsActiveTab', newTab); - this.updateFilteredSections(); - } - }); - - // Load settings - await this.fetchSettings(); - this.updateFilteredSections(); - }, - - switchTab(tab) { - // Update our component state - this.activeTab = tab; - - // Update the store safely - const store = Alpine.store('root'); - if (store) { - store.activeTab = tab; - } - }, - - async fetchSettings() { - try { - this.isLoading = true; - const response = await fetchApi('/api/settings_get', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - } - }); - - if (response.ok) { - const data = await response.json(); - if (data && data.settings) { - this.settingsData = data.settings; - } else { - console.error('Invalid settings data format'); - } - } else { - console.error('Failed to fetch settings:', response.statusText); - } - } catch (error) { - console.error('Error fetching settings:', error); - } finally { - this.isLoading = false; - } - }, - - updateFilteredSections() { - // Filter sections based on active tab - if (this.activeTab === 'agent') { - this.filteredSections = this.settingsData.sections?.filter(section => - section.tab === 'agent' - ) || []; - } else if (this.activeTab === 'external') { - this.filteredSections = this.settingsData.sections?.filter(section => - section.tab === 'external' - ) || []; - } else if (this.activeTab === 'developer') { - this.filteredSections = this.settingsData.sections?.filter(section => - section.tab === 'developer' - ) || []; - } else if (this.activeTab === 'mcp') { - this.filteredSections = this.settingsData.sections?.filter(section => - section.tab === 'mcp' - ) || []; - } else if (this.activeTab === 'backup') { - this.filteredSections = this.settingsData.sections?.filter(section => - section.tab === 'backup' - ) || []; - } else { - // For any other tab, show nothing since those tabs have custom UI - this.filteredSections = []; - } - }, - - async saveSettings() { - try { - // First validate - for (const section of this.settingsData.sections) { - for (const field of section.fields) { - if (field.required && (!field.value || field.value.trim() === '')) { - showToast(`${field.title} in ${section.title} is required`, 'error'); - return; - } - } - } - - // Prepare data - const formData = {}; - for (const section of this.settingsData.sections) { - for (const field of section.fields) { - formData[field.id] = field.value; - } - } - - // Send request - const response = await fetchApi('/api/settings_save', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify(formData) - }); - - if (response.ok) { - showToast('Settings saved successfully', 'success'); - // Refresh settings - await this.fetchSettings(); - } else { - const errorData = await response.json(); - throw new Error(errorData.error || 'Failed to save settings'); - } - } catch (error) { - console.error('Error saving settings:', error); - showToast('Failed to save settings: ' + error.message, 'error'); - } - }, - - // Handle special button field actions - handleFieldButton(field) { - if (field.action === 'test_connection') { - this.testConnection(field); - } else if (field.action === 'reveal_token') { - this.revealToken(field); - } else if (field.action === 'generate_token') { - this.generateToken(field); - } else { - console.warn('Unknown button action:', field.action); - } - }, - - // Test API connection - async testConnection(field) { - try { - field.testResult = 'Testing...'; - field.testStatus = 'loading'; - - // Find the API key field - let apiKey = ''; - for (const section of this.settingsData.sections) { - for (const f of section.fields) { - if (f.id === field.target) { - apiKey = f.value; - break; - } - } - } - - if (!apiKey) { - throw new Error('API key is required'); - } - - // Send test request - const response = await fetchApi('/api/test_connection', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - service: field.service, - api_key: apiKey - }) - }); - - const data = await response.json(); - - if (response.ok && data.success) { - field.testResult = 'Connection successful!'; - field.testStatus = 'success'; - } else { - throw new Error(data.error || 'Connection failed'); - } - } catch (error) { - console.error('Connection test failed:', error); - field.testResult = `Failed: ${error.message}`; - field.testStatus = 'error'; - } - }, - - // Reveal token temporarily - revealToken(field) { - // Find target field - for (const section of this.settingsData.sections) { - for (const f of section.fields) { - if (f.id === field.target) { - // Toggle field type - f.type = f.type === 'password' ? 'text' : 'password'; - - // Update button text - field.value = f.type === 'password' ? 'Show' : 'Hide'; - - break; - } - } - } - }, - - // Generate random token - generateToken(field) { - // Find target field - for (const section of this.settingsData.sections) { - for (const f of section.fields) { - if (f.id === field.target) { - // Generate random token - const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; - let token = ''; - for (let i = 0; i < 32; i++) { - token += chars.charAt(Math.floor(Math.random() * chars.length)); - } - - // Set field value - f.value = token; - break; - } - } - } - }, - - closeModal() { - // Stop scheduler polling before closing the modal - const schedulerElement = document.querySelector('[x-data="schedulerSettings"]'); - if (schedulerElement) { - const schedulerData = Alpine.$data(schedulerElement); - if (schedulerData && typeof schedulerData.stopPolling === 'function') { - console.log('Stopping scheduler polling on modal close'); - schedulerData.stopPolling(); - } - } - - this.$store.root.isOpen = false; - } - }; - }); -}); - -// Show toast notification - now uses new notification system -function showToast(message, type = 'info') { - // Use new frontend notification system based on type - if (window.Alpine && window.Alpine.store && window.Alpine.store('notificationStore')) { - const store = window.Alpine.store('notificationStore'); - switch (type.toLowerCase()) { - case 'error': - return store.frontendError(message, "Settings", 5); - case 'success': - return store.frontendInfo(message, "Settings", 3); - case 'warning': - return store.frontendWarning(message, "Settings", 4); - case 'info': - default: - return store.frontendInfo(message, "Settings", 3); - } - } else { - // Fallback if Alpine/store not ready - console.log(`SETTINGS ${type.toUpperCase()}: ${message}`); - return null; - } -} diff --git a/webui/js/shortcuts.js b/webui/js/shortcuts.js new file mode 100644 index 0000000000..3043d1d46b --- /dev/null +++ b/webui/js/shortcuts.js @@ -0,0 +1,36 @@ +import { store as chatsStore } from "/components/sidebar/chats/chats-store.js"; +import { callJsonApi } from "/js/api.js"; +import * as modals from "/js/modals.js"; +import { + NotificationType, + NotificationPriority, + store as notificationStore, +} from "/components/notifications/notification-store.js"; + +// shortcuts utils for convenience + +// api +export { callJsonApi }; + +// notifications +export { NotificationType, NotificationPriority }; +export const frontendNotification = + notificationStore.frontendNotification.bind(notificationStore); + +// chat context +export function getCurrentContextId() { + return chatsStore.getSelectedChatId(); +} + +export function getCurrentContext(){ + return chatsStore.getSelectedContext(); +} + +// modals +export function openModal(modalPath) { + return modals.openModal(modalPath); +} + +export function closeModal(modalPath = null) { + return modals.closeModal(modalPath); +} diff --git a/webui/js/time-utils.js b/webui/js/time-utils.js index 9baba7fa69..0850ff04f2 100644 --- a/webui/js/time-utils.js +++ b/webui/js/time-utils.js @@ -69,3 +69,23 @@ export function formatDateTime(utcIsoString, format = 'full') { export function getUserTimezone() { return Intl.DateTimeFormat().resolvedOptions().timeZone; } + +/** + * Format a duration in milliseconds to a human-readable string + * @param {number} durationMs - Duration in milliseconds + * @returns {string} Formatted duration (e.g., '45s', '2m30s') + */ +export function formatDuration(durationMs) { + if (durationMs == null || durationMs < 0) return '0s'; + + // Round total seconds first to avoid "1m60s" when seconds round up to 60 + const totalSecs = Math.round(durationMs / 1000); + + if (totalSecs < 60) { + return `${totalSecs}s`; + } + + const mins = Math.floor(totalSecs / 60); + const secs = totalSecs % 60; + return `${mins}m${secs}s`; +} diff --git a/webui/public/update_checker.svg b/webui/public/update_checker.svg new file mode 100644 index 0000000000..d3c7c05e7f --- /dev/null +++ b/webui/public/update_checker.svg @@ -0,0 +1 @@ + \ No newline at end of file