diff --git a/AGENTS.md b/AGENTS.md index 240d977b4..8f5684dcb 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,2 +1,7 @@ -Present links to images and other content in Markdown links -- for example: ![Image](https://link.to.image) +## fast-agent contributor notes +- Use `uv run` for repo scripts and examples. +- Always run `uv run scripts/lint.py` and `uv run scripts/typecheck.py` after code changes. +- Keep examples under `examples/` in sync with packaged resources when relevant. +- Prefer small, focused diffs; avoid reformatting unrelated code. +- Use Markdown links for images and other content (example: `![Image](https://link.to.image)`). diff --git a/examples/mcp/elicitations/elicitation_forms_server.py b/examples/mcp/elicitations/elicitation_forms_server.py index bd8871d8b..f38014e42 100644 --- a/examples/mcp/elicitations/elicitation_forms_server.py +++ b/examples/mcp/elicitations/elicitation_forms_server.py @@ -7,7 +7,7 @@ import logging import sys -from typing import Optional, TypedDict +from typing import Optional, TypedDict, cast from mcp import ReadResourceResult from mcp.server.elicitation import ( @@ -51,7 +51,10 @@ def _create_enum_schema_options(data: dict[str, str]) -> list[TitledEnumOption]: >>> _create_enum_schema_options({"dark": "Dark Mode", "light": "Light Mode"}) [{"const": "dark", "title": "Dark Mode"}, {"const": "light", "title": "Light Mode"}] """ - return [{"const": k, "title": v} for k, v in data.items()] + options: list[TitledEnumOption] = [ + cast("TitledEnumOption", {"const": k, "title": v}) for k, v in data.items() + ] + return options @mcp.resource(uri="elicitation://event-registration") diff --git a/examples/mcp/elicitations/game_character_handler.py b/examples/mcp/elicitations/game_character_handler.py index 8606b95a5..e32a107ce 100644 --- a/examples/mcp/elicitations/game_character_handler.py +++ b/examples/mcp/elicitations/game_character_handler.py @@ -33,8 +33,9 @@ async def game_character_elicitation_handler( """Custom handler that creates an interactive character creation experience.""" logger.info(f"Game character elicitation handler called: {params.message}") - if params.requestedSchema: - properties = params.requestedSchema.get("properties", {}) + requested_schema = getattr(params, "requestedSchema", None) + if requested_schema: + properties = requested_schema.get("properties", {}) content: dict[str, Any] = {} console.print("\n[bold magenta]🎮 Character Creation Studio 🎮[/bold magenta]\n") diff --git a/examples/tensorzero/simple_agent.py b/examples/tensorzero/simple_agent.py index b19c849a0..6e666f826 100644 --- a/examples/tensorzero/simple_agent.py +++ b/examples/tensorzero/simple_agent.py @@ -18,7 +18,7 @@ async def main(): async with fast.run() as agent_app: agent_name = "default" print("\nStarting interactive session with template_vars set via decorator...") - await agent_app.interactive(agent=agent_name) + await agent_app.interactive(agent_name=agent_name) if __name__ == "__main__": diff --git a/examples/workflows-md/agents_as_tools_extended/London-Project-Manager.md b/examples/workflows-md/agents_as_tools_extended/London-Project-Manager.md new file mode 100644 index 000000000..1d61e95ed --- /dev/null +++ b/examples/workflows-md/agents_as_tools_extended/London-Project-Manager.md @@ -0,0 +1,8 @@ +--- +type: agent +name: London-Project-Manager +servers: +- time +- fetch +--- +You are a London project manager. For each given topic, get the current local time in London and a brief, project-relevant news summary using the 'time' and 'fetch' MCP servers. If a source returns HTTP 403 or is blocked by robots.txt, try up to five alternative public sources before giving up and clearly state any remaining access limits. Hint: BBC: https://www.bbc.com/ and FT: https://www.ft.com/ diff --git a/examples/workflows-md/agents_as_tools_extended/NY-Project-Manager.md b/examples/workflows-md/agents_as_tools_extended/NY-Project-Manager.md new file mode 100644 index 000000000..147ab5027 --- /dev/null +++ b/examples/workflows-md/agents_as_tools_extended/NY-Project-Manager.md @@ -0,0 +1,8 @@ +--- +type: agent +name: NY-Project-Manager +servers: +- time +- fetch +--- +You are a New York project manager. For each given topic, get the current local time in New York and a brief, project-relevant news summary using the 'time' and 'fetch' MCP servers. If a source returns HTTP 403 or is blocked by robots.txt, try up to five alternative public sources before giving up and clearly state any remaining access limits. Hint: Fast-Agent site: https://fast-agent.ai diff --git a/examples/workflows-md/agents_as_tools_extended/PMO-orchestrator.md b/examples/workflows-md/agents_as_tools_extended/PMO-orchestrator.md new file mode 100644 index 000000000..aeb1b658f --- /dev/null +++ b/examples/workflows-md/agents_as_tools_extended/PMO-orchestrator.md @@ -0,0 +1,13 @@ +--- +type: agent +name: PMO-orchestrator +default: true +agents: +- NY-Project-Manager +- London-Project-Manager +history_mode: scratch +max_parallel: 128 +child_timeout_sec: 120 +max_display_instances: 20 +--- +Get project updates from the New York and London project managers. Ask NY-Project-Manager three times about different projects: Anthropic, evalstate/fast-agent, and OpenAI, and London-Project-Manager for economics review. Return a brief, concise combined summary with clear city/time/topic labels. diff --git a/examples/workflows-md/agents_as_tools_simple/London-Project-Manager.md b/examples/workflows-md/agents_as_tools_simple/London-Project-Manager.md new file mode 100644 index 000000000..b09df8df1 --- /dev/null +++ b/examples/workflows-md/agents_as_tools_simple/London-Project-Manager.md @@ -0,0 +1,7 @@ +--- +type: agent +name: London-Project-Manager +servers: +- time +--- +Return London time + timezone, plus a one-line news update. diff --git a/examples/workflows-md/agents_as_tools_simple/NY-Project-Manager.md b/examples/workflows-md/agents_as_tools_simple/NY-Project-Manager.md new file mode 100644 index 000000000..e6cdd5466 --- /dev/null +++ b/examples/workflows-md/agents_as_tools_simple/NY-Project-Manager.md @@ -0,0 +1,7 @@ +--- +type: agent +name: NY-Project-Manager +servers: +- time +--- +Return NY time + timezone, plus a one-line project status. diff --git a/examples/workflows-md/agents_as_tools_simple/PMO-orchestrator.md b/examples/workflows-md/agents_as_tools_simple/PMO-orchestrator.md new file mode 100644 index 000000000..3775a60c0 --- /dev/null +++ b/examples/workflows-md/agents_as_tools_simple/PMO-orchestrator.md @@ -0,0 +1,9 @@ +--- +type: agent +name: PMO-orchestrator +default: true +agents: +- NY-Project-Manager +- London-Project-Manager +--- +Get reports. Always use one tool call per project/news. Responsibilities: NY projects: [OpenAI, Fast-Agent, Anthropic]. London news: [Economics, Art, Culture]. Aggregate results and add a one-line PMO summary. diff --git a/examples/workflows-md/chaining/post_writer.md b/examples/workflows-md/chaining/post_writer.md new file mode 100644 index 000000000..f83098677 --- /dev/null +++ b/examples/workflows-md/chaining/post_writer.md @@ -0,0 +1,9 @@ +--- +type: chain +name: post_writer +default: true +sequence: +- url_fetcher +- social_media +--- +Chain processes requests through a series of agents in sequence, the output of each agent is passed to the next. diff --git a/examples/workflows-md/chaining/social_media.md b/examples/workflows-md/chaining/social_media.md new file mode 100644 index 000000000..4d729fecc --- /dev/null +++ b/examples/workflows-md/chaining/social_media.md @@ -0,0 +1,7 @@ +--- +type: agent +name: social_media +--- + + Write a 280 character social media post for any given text. + Respond only with the post, never use hashtags. diff --git a/examples/workflows-md/chaining/url_fetcher.md b/examples/workflows-md/chaining/url_fetcher.md new file mode 100644 index 000000000..a7ef43192 --- /dev/null +++ b/examples/workflows-md/chaining/url_fetcher.md @@ -0,0 +1,7 @@ +--- +type: agent +name: url_fetcher +servers: +- fetch +--- +Given a URL, provide a complete and comprehensive summary diff --git a/examples/workflows-md/evaluator/cover_letter_writer.md b/examples/workflows-md/evaluator/cover_letter_writer.md new file mode 100644 index 000000000..04f7703e3 --- /dev/null +++ b/examples/workflows-md/evaluator/cover_letter_writer.md @@ -0,0 +1,13 @@ +--- +type: evaluator_optimizer +name: cover_letter_writer +generator: generator +evaluator: evaluator +min_rating: EXCELLENT +max_refinements: 3 +refinement_instruction: null +--- + + You implement an iterative refinement process where content is generated, + evaluated for quality, and then refined based on specific feedback until + it reaches an acceptable quality standard. diff --git a/examples/workflows-md/evaluator/evaluator.md b/examples/workflows-md/evaluator/evaluator.md new file mode 100644 index 000000000..e376de65e --- /dev/null +++ b/examples/workflows-md/evaluator/evaluator.md @@ -0,0 +1,21 @@ +--- +type: agent +name: evaluator +model: o3-mini.medium +--- +Evaluate the following response based on the criteria below: + 1. Clarity: Is the language clear, concise, and grammatically correct? + 2. Specificity: Does the response include relevant and concrete details tailored to the job description? + 3. Relevance: Does the response align with the prompt and avoid unnecessary information? + 4. Tone and Style: Is the tone professional and appropriate for the context? + 5. Persuasiveness: Does the response effectively highlight the candidate's value? + 6. Grammar and Mechanics: Are there any spelling or grammatical issues? + 7. Feedback Alignment: Has the response addressed feedback from previous iterations? + + For each criterion: + - Provide a rating (EXCELLENT, GOOD, FAIR, or POOR). + - Offer specific feedback or suggestions for improvement. + + Summarize your evaluation as a structured response with: + - Overall quality rating. + - Specific feedback and areas for improvement. diff --git a/examples/workflows-md/evaluator/generator.md b/examples/workflows-md/evaluator/generator.md new file mode 100644 index 000000000..67746f012 --- /dev/null +++ b/examples/workflows-md/evaluator/generator.md @@ -0,0 +1,10 @@ +--- +type: agent +name: generator +model: gpt-5-nano.low +servers: +- fetch +--- +You are a career coach specializing in cover letter writing. + You are tasked with generating a compelling cover letter given the job posting, + candidate details, and company information. Tailor the response to the company and job requirements. diff --git a/examples/workflows-md/fastagent.config.yaml b/examples/workflows-md/fastagent.config.yaml new file mode 100644 index 000000000..c73a5f2e6 --- /dev/null +++ b/examples/workflows-md/fastagent.config.yaml @@ -0,0 +1,26 @@ +# Please edit this configuration file to match your environment (on Windows). +# Examples in comments below - check/change the paths. +# +# + +logger: + type: file + level: error + truncate_tools: true + +mcp: + servers: + filesystem: + # On windows update the command and arguments to use `node` and the absolute path to the server. + # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally. + # Use `npm -g root` to find the global node_modules path.` + # command: "node" + # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."] + command: "npx" + args: ["-y", "@modelcontextprotocol/server-filesystem", "."] + fetch: + command: "uvx" + args: ["mcp-server-fetch"] + time: + command: "uvx" + args: ["mcp-server-time"] diff --git a/examples/workflows-md/hf-api-agent/hf-api-agent.md b/examples/workflows-md/hf-api-agent/hf-api-agent.md new file mode 100644 index 000000000..773ba54f0 --- /dev/null +++ b/examples/workflows-md/hf-api-agent/hf-api-agent.md @@ -0,0 +1,393 @@ +--- +type: agent +name: hf-user +function_tools: + - hf_api_tool.py:hf_api_request +model: gpt-oss +default: true +description: Use this tool to find out information about Users, Organizations and Pull Requests +--- +Hugging Face Hub Methods: How to Call (User/Org Focus) +====================================================== + +Scope +----- +This card summarizes the curated user/organization-related methods and how to call +them via the hf_api_request tool (no shell usage). + +References: +- Curated method list (embedded below from scripts/hf_api_methods.txt) +- REST endpoints: scripts/hf_api_endpoints.txt +- Tool: hf_api_request (this card's function tool) + +Prereqs +------- +- HF_TOKEN env var (or ~/.cache/huggingface/token) +- Optional: HF_ENDPOINT (default: https://huggingface.co) +- Optional: HF_MAX_RESULTS (default: 20) + +Preferred: hf_api_request tool +------------------------------ +Tool call pattern: +- GET: hf_api_request(endpoint="/whoami-v2") +- GET with params: hf_api_request(endpoint="/users/{username}/likes") +- POST: hf_api_request(endpoint="/.../comment", method="POST", json_body={...}) + +Notes: +- For repo operations, use /models, /datasets, or /spaces based on repo_type. +- Only GET/POST are supported by this tool. PATCH/DELETE are not supported. +- Avoid destructive operations unless the user explicitly confirms. + +USER DATA +--------- +- whoami + tool: hf_api_request(endpoint="/whoami-v2") + +- activity (HTML scrape, not a public API endpoint) + tool: not available (HTML scrape is not supported by hf_api_request) + +- get_user_overview + tool: hf_api_request(endpoint="/users/{username}/overview") + +- list_liked_repos + tool: hf_api_request(endpoint="/users/{username}/likes") + +- get_token_permission + tool: not available (use /whoami-v2 and check auth.accessToken.role) + +USER NETWORK +------------ +- list_user_followers + tool: hf_api_request(endpoint="/users/{username}/followers") + +- list_user_following + tool: hf_api_request(endpoint="/users/{username}/following") + +ORGANIZATIONS +------------- +- get_organization_overview + tool: hf_api_request(endpoint="/organizations/{organization}/overview") + +- list_organization_members + tool: hf_api_request(endpoint="/organizations/{organization}/members") + +- list_organization_followers + tool: hf_api_request(endpoint="/organizations/{organization}/followers") + +DISCUSSIONS & PULL REQUESTS +--------------------------- +- get_repo_discussions + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/discussions", + params={"type": "pr|discussion", "author": "", "status": "open|closed"} + ) + +- get_discussion_details + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/discussions/{num}", + params={"diff": 1} + ) + +- create_discussion + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/discussions", + method="POST", + json_body={"title": "...", "description": "...", "pullRequest": false} + ) + +- create_pull_request + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/discussions", + method="POST", + json_body={"title": "...", "description": "...", "pullRequest": true} + ) + +- comment_discussion + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/discussions/{num}/comment", + method="POST", + json_body={"comment": "..."} + ) + +- edit_discussion_comment + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/discussions/{num}/comment/{comment_id}/edit", + method="POST", + json_body={"content": "..."} + ) + +- hide_discussion_comment (destructive) + tool: only with explicit confirmation: + hf_api_request( + endpoint="/{repo_type}s/{repo_id}/discussions/{num}/comment/{comment_id}/hide", + method="POST" + ) + +- change_discussion_status + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/discussions/{num}/status", + method="POST", + json_body={"status": "open|closed", "comment": "..."} + ) + +ACCESS REQUESTS (GATED REPOS) +----------------------------- +- list_pending_access_requests + tool: hf_api_request(endpoint="/{repo_type}s/{repo_id}/user-access-request/pending") + +- list_accepted_access_requests + tool: hf_api_request(endpoint="/{repo_type}s/{repo_id}/user-access-request/accepted") + +- list_rejected_access_requests + tool: hf_api_request(endpoint="/{repo_type}s/{repo_id}/user-access-request/rejected") + +- accept_access_request + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/user-access-request/handle", + method="POST", + json_body={"user": "...", "status": "accepted"} + ) + +- cancel_access_request + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/user-access-request/handle", + method="POST", + json_body={"user": "...", "status": "pending"} + ) + +- reject_access_request (destructive) + tool: only with explicit confirmation: + hf_api_request( + endpoint="/{repo_type}s/{repo_id}/user-access-request/handle", + method="POST", + json_body={"user": "...", "status": "rejected", "rejectionReason": "..."} + ) + +- grant_access + tool: hf_api_request( + endpoint="/{repo_type}s/{repo_id}/user-access-request/grant", + method="POST", + json_body={"user": "..."} + ) + +USER COLLECTIONS +---------------- +- list_collections + tool: hf_api_request(endpoint="/collections", params={"owner": ""}) + +- get_collection + tool: hf_api_request(endpoint="/collections/{slug}") + +- create_collection + tool: hf_api_request( + endpoint="/collections", + method="POST", + json_body={"title": "...", "namespace": "", "description": "...", "private": false} + ) + +- delete_collection + tool: DELETE not supported by hf_api_request + +- add_collection_item + tool: hf_api_request( + endpoint="/collections/{slug}/items", + method="POST", + json_body={"item": {"id": "...", "type": "model|dataset|space|paper"}, "note": "..."} + ) + +- delete_collection_item + tool: DELETE not supported by hf_api_request + +- update_collection_item + tool: PATCH not supported by hf_api_request + +- update_collection_metadata + tool: PATCH not supported by hf_api_request + +USER INTERACTIONS +----------------- +- like + tool: not available (Hub disables like API) + +- unlike + tool: DELETE not supported by hf_api_request + +- auth_check + tool: hf_api_request(endpoint="/{repo_type}s/{repo_id}/auth-check") + +Direct REST usage example +------------------------- + hf_api_request(endpoint="/organizations//overview") + +See scripts/hf_api_endpoints.txt for full endpoint details and expected request bodies. + +Curated HfApi Methods: User & Organization Data, Discussions & Interactions +=========================================================================== +Note: Some methods map to PATCH/DELETE endpoints, which are not supported by hf_api_request. +Use these as reference unless the tool is extended. + +34 methods selected from 126 total HfApi methods + + +USER DATA (4 methods) +================================================================================ + +get_user_overview(username: str, token: ...) -> User +-------------------------------------------------------------------------------- +Get an overview of a user on the Hub. + +list_liked_repos(user: Optional[str] = None, *, token: ...) -> UserLikes +-------------------------------------------------------------------------------- +List all public repos liked by a user on huggingface.co. + +whoami(token: ...) -> Dict +-------------------------------------------------------------------------------- +Call HF API to know "whoami". + +get_token_permission(token: ...) -> Literal['read', 'write', 'fineGrained', None] +-------------------------------------------------------------------------------- +Check if a given token is valid and return its permissions. + + +USER NETWORK (2 methods) +================================================================================ + +list_user_followers(username: str, token: ...) -> Iterable[User] +-------------------------------------------------------------------------------- +Get the list of followers of a user on the Hub. + +list_user_following(username: str, token: ...) -> Iterable[User] +-------------------------------------------------------------------------------- +Get the list of users followed by a user on the Hub. + + +ORGANIZATIONS (3 methods) +================================================================================ + +get_organization_overview(organization: str, token: ...) -> Organization +-------------------------------------------------------------------------------- +Get an overview of an organization on the Hub. + +list_organization_members(organization: str, token: ...) -> Iterable[User] +-------------------------------------------------------------------------------- +List of members of an organization on the Hub. + +list_organization_followers(organization: str, token: ...) -> Iterable[User] +-------------------------------------------------------------------------------- +List followers of an organization on the Hub. + + +DISCUSSIONS & PULL REQUESTS (8 methods) +================================================================================ + +create_discussion(repo_id: str, title: str, *, token: ..., description: ..., repo_type: ..., pull_request: bool = False) -> DiscussionWithDetails +-------------------------------------------------------------------------------- +Creates a Discussion or Pull Request. + +create_pull_request(repo_id: str, title: str, *, token: ..., description: ..., repo_type: ...) -> DiscussionWithDetails +-------------------------------------------------------------------------------- +Creates a Pull Request. Pull Requests created programmatically will be in "draft" status. + +get_discussion_details(repo_id: str, discussion_num: int, *, repo_type: ..., token: ...) -> DiscussionWithDetails +-------------------------------------------------------------------------------- +Fetches a Discussion's / Pull Request's details from the Hub. + +get_repo_discussions(repo_id: str, *, author: ..., discussion_type: ..., discussion_status: ..., repo_type: ..., token: ...) -> Iterator[Discussion] +-------------------------------------------------------------------------------- +Fetches Discussions and Pull Requests for the given repo. + +comment_discussion(repo_id: str, discussion_num: int, comment: str, *, token: ..., repo_type: ...) -> DiscussionComment +-------------------------------------------------------------------------------- +Creates a new comment on the given Discussion. + +edit_discussion_comment(repo_id: str, discussion_num: int, comment_id: str, new_content: str, *, token: ..., repo_type: ...) -> DiscussionComment +-------------------------------------------------------------------------------- +Edits a comment on a Discussion / Pull Request. + +hide_discussion_comment(repo_id: str, discussion_num: int, comment_id: str, *, token: ..., repo_type: ...) -> DiscussionComment +-------------------------------------------------------------------------------- +Hides a comment on a Discussion / Pull Request. + +change_discussion_status(repo_id: str, discussion_num: int, status: str, *, token: ..., repo_type: ..., comment: ...) -> Discussion +-------------------------------------------------------------------------------- +Changes the status of a Discussion or Pull Request. + + +ACCESS REQUESTS (GATED REPOS) (6 methods) +================================================================================ + +list_pending_access_requests(repo_id: str, *, token: ..., repo_type: ...) -> List[AccessRequest] +-------------------------------------------------------------------------------- +List pending access requests for a gated repo. + +list_accepted_access_requests(repo_id: str, *, token: ..., repo_type: ...) -> List[AccessRequest] +-------------------------------------------------------------------------------- +List accepted access requests for a gated repo. + +list_rejected_access_requests(repo_id: str, *, token: ..., repo_type: ...) -> List[AccessRequest] +-------------------------------------------------------------------------------- +List rejected access requests for a gated repo. + +accept_access_request(repo_id: str, user: str, *, token: ..., repo_type: ...) -> None +-------------------------------------------------------------------------------- +Accept access request to a gated repo. + +reject_access_request(repo_id: str, user: str, *, token: ..., repo_type: ..., rejection_reason: ...) -> None +-------------------------------------------------------------------------------- +Reject access request to a gated repo. + +grant_access(repo_id: str, user: str, *, token: ..., repo_type: ...) -> None +-------------------------------------------------------------------------------- +Grant access to a gated repo without an access request. + + +USER COLLECTIONS (8 methods) +================================================================================ + +get_collection(collection_slug: str, *, token: ...) -> Collection +-------------------------------------------------------------------------------- +Get a collection's details from the Hub. + +create_collection(title: str, *, namespace: ..., description: ..., private: ..., token: ...) -> Collection +-------------------------------------------------------------------------------- +Create a new collection on the Hub. + +list_collections(*, owner: ..., item: ..., sort: ..., limit: ..., token: ...) -> Iterable[Collection] +-------------------------------------------------------------------------------- +List collections on the Huggingface Hub, given some filters. + +delete_collection(collection_slug: str, *, missing_ok: bool = False, token: ...) -> None +-------------------------------------------------------------------------------- +Delete a collection on the Hub. + +add_collection_item(collection_slug: str, item_id: str, item_type: CollectionItemType_T, *, note: ..., exists_ok: bool = False, token: ...) -> Collection +-------------------------------------------------------------------------------- +Add an item to a collection on the Hub. + +delete_collection_item(collection_slug: str, item_object_id: str, *, missing_ok: bool = False, token: ...) -> None +-------------------------------------------------------------------------------- +Delete an item from a collection. + +update_collection_item(collection_slug: str, item_object_id: str, *, note: ..., position: ..., token: ...) -> None +-------------------------------------------------------------------------------- +Update an item in a collection. + +update_collection_metadata(collection_slug: str, *, title: ..., description: ..., position: ..., private: ..., theme: ..., token: ...) -> Collection +-------------------------------------------------------------------------------- +Update the metadata of a collection on the Hub. + + +USER INTERACTIONS (3 methods) +================================================================================ + +like(repo_id: str, *, token: ..., repo_type: ...) -> None +-------------------------------------------------------------------------------- +Like a given repo on the Hub (star). + +unlike(repo_id: str, *, token: ..., repo_type: ...) -> None +-------------------------------------------------------------------------------- +Unlike a given repo on the Hub (unstar). + +auth_check(repo_id: str, *, repo_type: ..., token: ...) -> None +-------------------------------------------------------------------------------- +Check if the provided user token has access to a specific repository on the Hugging Face Hub. diff --git a/examples/workflows-md/hf-api-agent/hf_api_tool.py b/examples/workflows-md/hf-api-agent/hf_api_tool.py new file mode 100644 index 000000000..9234c36b1 --- /dev/null +++ b/examples/workflows-md/hf-api-agent/hf_api_tool.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +import json +import os +from pathlib import Path +from typing import Any +from urllib.error import HTTPError, URLError +from urllib.parse import urlencode +from urllib.request import Request, urlopen + +DEFAULT_MAX_RESULTS = 20 +DEFAULT_TIMEOUT_SEC = 30 + + +def _load_token() -> str | None: + token = os.getenv("HF_TOKEN") + if token: + return token + + token_path = Path.home() / ".cache" / "huggingface" / "token" + if token_path.exists(): + token_value = token_path.read_text(encoding="utf-8").strip() + return token_value or None + + return None + + +def _max_results_from_env() -> int: + raw = os.getenv("HF_MAX_RESULTS") + if not raw: + return DEFAULT_MAX_RESULTS + try: + value = int(raw) + except ValueError: + return DEFAULT_MAX_RESULTS + return value if value > 0 else DEFAULT_MAX_RESULTS + + +def _normalize_endpoint(endpoint: str) -> str: + if endpoint.startswith("http://") or endpoint.startswith("https://"): + raise ValueError("Endpoint must be a path relative to /api, not a full URL.") + endpoint = endpoint.strip() + if not endpoint: + raise ValueError("Endpoint must be a non-empty string.") + if not endpoint.startswith("/"): + endpoint = f"/{endpoint}" + return endpoint + + +def _normalize_params(params: dict[str, Any] | None) -> dict[str, Any]: + if not params: + return {} + normalized: dict[str, Any] = {} + for key, value in params.items(): + if value is None: + continue + if isinstance(value, (list, tuple)): + normalized[key] = [str(item) for item in value] + else: + normalized[key] = str(value) + return normalized + + +def _build_url(endpoint: str, params: dict[str, Any] | None) -> str: + base = os.getenv("HF_ENDPOINT", "https://huggingface.co").rstrip("/") + url = f"{base}/api{_normalize_endpoint(endpoint)}" + normalized_params = _normalize_params(params) + if normalized_params: + url = f"{url}?{urlencode(normalized_params, doseq=True)}" + return url + + +def hf_api_request( + endpoint: str, + method: str = "GET", + params: dict[str, Any] | None = None, + json_body: dict[str, Any] | None = None, + max_results: int | None = None, +) -> dict[str, Any]: + """ + Call the Hugging Face Hub API (GET/POST only). + + Args: + endpoint: API endpoint relative to /api (e.g. "/whoami-v2"). + method: HTTP method (GET or POST). + params: Optional query parameters. + json_body: Optional JSON payload for POST requests. + max_results: Max results when response is a list (defaults to HF_MAX_RESULTS). + + Returns: + A dict with the response data and request metadata. + """ + method_upper = method.upper() + if method_upper not in {"GET", "POST"}: + raise ValueError("Only GET and POST are allowed for hf_api_request.") + + if method_upper == "GET" and json_body is not None: + raise ValueError("GET requests do not accept json_body.") + + url = _build_url(endpoint, params) + + headers = { + "Accept": "application/json", + } + token = _load_token() + if token: + headers["Authorization"] = f"Bearer {token}" + + data = None + if method_upper == "POST": + headers["Content-Type"] = "application/json" + data = json.dumps(json_body or {}).encode("utf-8") + + request = Request(url, headers=headers, data=data, method=method_upper) + + try: + with urlopen(request, timeout=DEFAULT_TIMEOUT_SEC) as response: + raw = response.read() + status_code = response.status + except HTTPError as exc: + error_body = exc.read().decode("utf-8", errors="replace") + raise RuntimeError( + f"HF API error {exc.code} for {url}: {error_body}" + ) from exc + except URLError as exc: + raise RuntimeError(f"HF API request failed for {url}: {exc}") from exc + + try: + payload = json.loads(raw) + except json.JSONDecodeError: + payload = raw.decode("utf-8", errors="replace") + + if isinstance(payload, list): + limit = max_results if max_results is not None else _max_results_from_env() + payload = payload[: max(limit, 0)] + + return { + "url": url, + "status": status_code, + "data": payload, + } diff --git a/examples/workflows-md/human_input/default.md b/examples/workflows-md/human_input/default.md new file mode 100644 index 000000000..498a7ad0e --- /dev/null +++ b/examples/workflows-md/human_input/default.md @@ -0,0 +1,6 @@ +--- +type: agent +name: default +human_input: true +--- +An AI agent that assists with basic tasks. Request Human Input when needed - for exampleif being asked to predict a number sequence or pretending to take pizza orders. diff --git a/examples/workflows-md/maker/classifier.md b/examples/workflows-md/maker/classifier.md new file mode 100644 index 000000000..f21e60494 --- /dev/null +++ b/examples/workflows-md/maker/classifier.md @@ -0,0 +1,14 @@ +--- +type: agent +name: classifier +model: claude-3-haiku-20240307 +--- +You are a customer support intent classifier. +Classify the customer message into exactly one of: COMPLAINT, QUESTION, REQUEST, FEEDBACK. +Respond with ONLY the single word classification, nothing else. + +Examples: +- "This product is broken!" → COMPLAINT +- "How do I reset my password?" → QUESTION +- "Please cancel my subscription" → REQUEST +- "Just wanted to say I love the new feature" → FEEDBACK diff --git a/examples/workflows-md/maker/reliable_classifier.md b/examples/workflows-md/maker/reliable_classifier.md new file mode 100644 index 000000000..17dfe15e5 --- /dev/null +++ b/examples/workflows-md/maker/reliable_classifier.md @@ -0,0 +1,14 @@ +--- +type: MAKER +name: reliable_classifier +worker: classifier +k: 3 +max_samples: 10 +match_strategy: normalized +red_flag_max_length: 20 +--- + + MAKER: Massively decomposed Agentic processes with K-voting Error Reduction. + Implements statistical error correction through voting consensus. + Multiple samples are drawn and the first response to achieve a k-vote + margin wins, ensuring high reliability even with cost-effective models. diff --git a/examples/workflows-md/orchestrator/author.md b/examples/workflows-md/orchestrator/author.md new file mode 100644 index 000000000..6b3efacce --- /dev/null +++ b/examples/workflows-md/orchestrator/author.md @@ -0,0 +1,9 @@ +--- +type: agent +name: author +servers: +- filesystem +--- +You are to role play a poorly skilled writer, + who makes frequent grammar, punctuation and spelling errors. You enjoy + writing short stories, but the narrative doesn't always make sense diff --git a/examples/workflows-md/orchestrator/finder.md b/examples/workflows-md/orchestrator/finder.md new file mode 100644 index 000000000..e06515167 --- /dev/null +++ b/examples/workflows-md/orchestrator/finder.md @@ -0,0 +1,12 @@ +--- +type: agent +name: finder +model: gpt-4.1 +servers: +- fetch +- filesystem +--- +You are an agent with access to the filesystem, + as well as the ability to fetch URLs. Your job is to identify + the closest match to a user's request, make the appropriate tool calls, + and return the URI and CONTENTS of the closest match. diff --git a/examples/workflows-md/orchestrator/orchestrate.md b/examples/workflows-md/orchestrator/orchestrate.md new file mode 100644 index 000000000..95bc0c772 --- /dev/null +++ b/examples/workflows-md/orchestrator/orchestrate.md @@ -0,0 +1,29 @@ +--- +type: iterative_planner +name: orchestrate +model: sonnet +agents: +- finder +- writer +- proofreader +plan_iterations: 5 +--- + +You are an expert planner, able to Orchestrate complex tasks by breaking them down in to +manageable steps, and delegating tasks to Agents. + +You work iteratively - given an Objective, you consider the current state of the plan, +decide the next step towards the goal. You document those steps and create clear instructions +for execution by the Agents, being specific about what you need to know to assess task completion. + +NOTE: A 'Planning Step' has a description, and a list of tasks that can be delegated +and executed in parallel. + +Agents have a 'description' describing their primary function, and a set of 'skills' that +represent Tools they can use in completing their function. + +The following Agents are available to you: + +{{agents}} + +You must specify the Agent name precisely when generating a Planning Step. diff --git a/examples/workflows-md/orchestrator/proofreader.md b/examples/workflows-md/orchestrator/proofreader.md new file mode 100644 index 000000000..7b122acbc --- /dev/null +++ b/examples/workflows-md/orchestrator/proofreader.md @@ -0,0 +1,10 @@ +--- +type: agent +name: proofreader +model: gpt-4.1 +servers: +- fetch +--- +"Review the short story for grammar, spelling, and punctuation errors. + Identify any awkward phrasing or structural issues that could improve clarity. + Provide detailed feedback on corrections. diff --git a/examples/workflows-md/orchestrator/writer.md b/examples/workflows-md/orchestrator/writer.md new file mode 100644 index 000000000..f426bed64 --- /dev/null +++ b/examples/workflows-md/orchestrator/writer.md @@ -0,0 +1,9 @@ +--- +type: agent +name: writer +servers: +- filesystem +--- +You are an agent that can write to the filesystem. + You are tasked with taking the user's input, addressing it, and + writing the result to disk in the appropriate location. diff --git a/examples/workflows-md/parallel/fact_checker.md b/examples/workflows-md/parallel/fact_checker.md new file mode 100644 index 000000000..a813e61cd --- /dev/null +++ b/examples/workflows-md/parallel/fact_checker.md @@ -0,0 +1,7 @@ +--- +type: agent +name: fact_checker +--- +Verify the factual consistency within the story. Identify any contradictions, + logical inconsistencies, or inaccuracies in the plot, character actions, or setting. + Highlight potential issues with reasoning or coherence. diff --git a/examples/workflows-md/parallel/grader.md b/examples/workflows-md/parallel/grader.md new file mode 100644 index 000000000..82c6581fe --- /dev/null +++ b/examples/workflows-md/parallel/grader.md @@ -0,0 +1,8 @@ +--- +type: agent +name: grader +--- +Compile the feedback from the Proofreader, Fact Checker, and Style Enforcer + into a structured report. Summarize key issues and categorize them by type. + Provide actionable recommendations for improving the story, + and give an overall grade based on the feedback. diff --git a/examples/workflows-md/parallel/parallel.md b/examples/workflows-md/parallel/parallel.md new file mode 100644 index 000000000..2379ef31a --- /dev/null +++ b/examples/workflows-md/parallel/parallel.md @@ -0,0 +1,12 @@ +--- +type: parallel +name: parallel +fan_out: +- proofreader +- fact_checker +- style_enforcer +fan_in: grader +--- + + You are a parallel processor that executes multiple agents simultaneously + and aggregates their results. diff --git a/examples/workflows-md/parallel/proofreader.md b/examples/workflows-md/parallel/proofreader.md new file mode 100644 index 000000000..88524bddd --- /dev/null +++ b/examples/workflows-md/parallel/proofreader.md @@ -0,0 +1,7 @@ +--- +type: agent +name: proofreader +--- +"Review the short story for grammar, spelling, and punctuation errors. + Identify any awkward phrasing or structural issues that could improve clarity. + Provide detailed feedback on corrections. diff --git a/examples/workflows-md/parallel/style_enforcer.md b/examples/workflows-md/parallel/style_enforcer.md new file mode 100644 index 000000000..3c6b4bcae --- /dev/null +++ b/examples/workflows-md/parallel/style_enforcer.md @@ -0,0 +1,8 @@ +--- +type: agent +name: style_enforcer +model: sonnet +--- +Analyze the story for adherence to style guidelines. + Evaluate the narrative flow, clarity of expression, and tone. Suggest improvements to + enhance storytelling, readability, and engagement. diff --git a/examples/workflows-md/router/code_expert.md b/examples/workflows-md/router/code_expert.md new file mode 100644 index 000000000..07cf304fd --- /dev/null +++ b/examples/workflows-md/router/code_expert.md @@ -0,0 +1,10 @@ +--- +type: agent +name: code_expert +model: haiku +servers: +- filesystem +--- +You are an expert in code analysis and software engineering. + When asked about code, architecture, or development practices, + you provide thorough and practical insights. diff --git a/examples/workflows-md/router/fetcher.md b/examples/workflows-md/router/fetcher.md new file mode 100644 index 000000000..f2d40258e --- /dev/null +++ b/examples/workflows-md/router/fetcher.md @@ -0,0 +1,8 @@ +--- +type: agent +name: fetcher +model: haiku +servers: +- fetch +--- +You are an agent, with a tool enabling you to fetch URLs. diff --git a/examples/workflows-md/router/general_assistant.md b/examples/workflows-md/router/general_assistant.md new file mode 100644 index 000000000..c047d3d1f --- /dev/null +++ b/examples/workflows-md/router/general_assistant.md @@ -0,0 +1,7 @@ +--- +type: agent +name: general_assistant +model: haiku +--- +You are a knowledgeable assistant that provides clear, + well-reasoned responses about general topics, concepts, and principles. diff --git a/examples/workflows-md/router/route.md b/examples/workflows-md/router/route.md new file mode 100644 index 000000000..ada0a50fb --- /dev/null +++ b/examples/workflows-md/router/route.md @@ -0,0 +1,18 @@ +--- +type: router +name: route +default: true +model: sonnet +agents: +- code_expert +- general_assistant +- fetcher +--- + +You are a highly accurate request router that directs incoming requests to the most appropriate agent. +Analyze each request and determine which specialized agent would be best suited to handle it based on their capabilities. + +Follow these guidelines: +- Carefully match the request's needs with each agent's capabilities and description +- Select the single most appropriate agent for the request +- Provide your confidence level (high, medium, low) and brief reasoning for your selection diff --git a/examples/workflows-md/short_story.md b/examples/workflows-md/short_story.md new file mode 100644 index 000000000..6d18b7e8b --- /dev/null +++ b/examples/workflows-md/short_story.md @@ -0,0 +1,13 @@ +The Kittens Castle Adventuer + +One sunny day, three lil kittens name Whiskers, Socks, and Mittens was walkin threw a mystirus forrest. They hadnt never seen such a big forrest before! The trees was tall an spooky, an the ground was coverd in moss an stikks. + +Suddenlee, thru the trees, they sawd somthing HUUUUGE! It was a castell, but not just eny castell. This castell was made of sparkling chese an glittery windos. The turrits was so high they tuch the clowds, an the doars was big enuff for a elefant to walk threw! + +"Lookk!" sed Whiskers, his tale all poofy wit exsitement. "We fowned a castell!" Socks meowed loudly an jumped up an down. Mittens, who was the smallist kitten, just stared wit her big rond eyes. + +They climed up the cheesy walls, slip-slidin on the smoth surfase. Inside, they discoverd rooms ful of soft pillows an dangling strings an shiny things that went JINGEL when they tuch them. It was like a kitten paradyse! + +But then, a big shadowy figur apeared... was it the castell gaurd? Or sumthing mor mystirus? The kittens hudeld togethar, there lil hearts beating fast. What wud happan next in there amazeing adventuer? + +THE END?? \ No newline at end of file diff --git a/examples/workflows-md/short_story.txt b/examples/workflows-md/short_story.txt new file mode 100644 index 000000000..f80cb90e8 --- /dev/null +++ b/examples/workflows-md/short_story.txt @@ -0,0 +1,19 @@ +The Battle of Glimmerwood + +In the heart of Glimmerwood, a mystical forest knowed for its radiant trees, a small village thrived. +The villagers, who were live peacefully, shared their home with the forest's magical creatures, +especially the Glimmerfoxes whose fur shimmer like moonlight. + +One fateful evening, the peace was shaterred when the infamous Dark Marauders attack. +Lead by the cunning Captain Thorn, the bandits aim to steal the precious Glimmerstones which was believed to grant immortality. + +Amidst the choas, a young girl named Elara stood her ground, she rallied the villagers and devised a clever plan. +Using the forests natural defenses they lured the marauders into a trap. +As the bandits aproached the village square, a herd of Glimmerfoxes emerged, blinding them with their dazzling light, +the villagers seized the opportunity to captured the invaders. + +Elara's bravery was celebrated and she was hailed as the "Guardian of Glimmerwood". +The Glimmerstones were secured in a hidden grove protected by an ancient spell. + +However, not all was as it seemed. The Glimmerstones true power was never confirm, +and whispers of a hidden agenda linger among the villagers. diff --git a/examples/workflows/maker.py b/examples/workflows/maker.py index a20622cd5..cdcedd3c3 100644 --- a/examples/workflows/maker.py +++ b/examples/workflows/maker.py @@ -60,6 +60,7 @@ """ import asyncio +from typing import Any, cast from fast_agent import FastAgent @@ -118,7 +119,7 @@ async def main(): results = [] for text in test_cases: result = await agent.reliable_classifier.send(text) - stats = agent.reliable_classifier.last_result + stats = cast("Any", agent.reliable_classifier).last_result results.append((text, result, stats)) # Display all results together diff --git a/plan/agent-card-rfc-sample.md b/plan/agent-card-rfc-sample.md new file mode 100644 index 000000000..d1b402cae --- /dev/null +++ b/plan/agent-card-rfc-sample.md @@ -0,0 +1,479 @@ +# AgentCard RFC Samples (MD) + +Note: samples mirror README.md code blocks in order. Each block is a full markdown file. Some samples group multiple cards for brevity; in practice, each card lives in its own file (one card per file by default). Non-card snippets (CLI/config/usage) are included as plain markdown file content. + +## Sample 1: Quickstart commands +```md +uv pip install fast-agent-mcp # install fast-agent! +fast-agent go # start an interactive session +fast-agent go --url https://hf.co/mcp # with a remote MCP +fast-agent go --model=generic.qwen2.5 # use ollama qwen 2.5 +fast-agent setup # create an example agent and config files +uv run agent.py # run your first agent +uv run agent.py --model=o3-mini.low # specify a model +uv run agent.py --transport http --port 8001 # expose as MCP server (server mode implied) +fast-agent quickstart workflow # create "building effective agents" examples +``` + +## Sample 2: Basic agent definition +```md +--- +type: agent +name: sizer +instruction: "Given an object, respond only with an estimate of its size." +--- +``` + +## Sample 3: Send a message to the agent +```md +--- +type: agent +name: sizer +instruction: "Given an object, respond only with an estimate of its size." +messages: ./history.md +--- +``` + +## Sample 4: Interactive chat (no preloaded messages) +```md +--- +type: agent +name: sizer +instruction: "Given an object, respond only with an estimate of its size." +--- +``` + +## Sample 5: Full sizer app (single agent file) +```md +--- +type: agent +name: sizer +default: true +instruction: "Given an object, respond only with an estimate of its size." +--- +``` + +## Sample 6: Function tools and hooks +```md +--- +type: agent +name: assistant +function_tools: + - tools.py:add_one +tool_hooks: + - tools.py:audit_hook +instruction: "Use local tools when needed and return concise results." +--- +``` + +## Sample 7: Combining agents and a chain +Card: url_fetcher +```md +--- +type: agent +name: url_fetcher +servers: + - fetch +instruction: "Given a URL, provide a complete and comprehensive summary." +--- +``` +Card: social_media +```md +--- +type: agent +name: social_media +--- +Write a 280 character social media post for any given text. +Respond only with the post, never use hashtags. +``` +Card: post_writer +```md +--- +type: chain +name: post_writer +sequence: + - url_fetcher + - social_media +instruction: "Generate a short social media post from a URL summary." +--- +``` + +## Sample 8: Run a chain from the CLI +```md +uv run workflow/chaining.py --agent post_writer --message "" +``` + +## Sample 9: MAKER workflow +Card: classifier +```md +--- +type: agent +name: classifier +instruction: "Reply with only: A, B, or C." +--- +``` +Card: reliable_classifier +```md +--- +type: MAKER +name: reliable_classifier +worker: classifier +k: 3 +max_samples: 25 +match_strategy: normalized +red_flag_max_length: 16 +instruction: "Repeat the worker and return the k-vote winner." +--- +``` + +## Sample 10: Agents as tools (orchestrator-workers) +Card: NY-Project-Manager +```md +--- +type: agent +name: NY-Project-Manager +servers: + - time +tools: + time: [get_time] +instruction: "Return NY time + timezone, plus a one-line project status." +--- +``` +Card: London-Project-Manager +```md +--- +type: agent +name: London-Project-Manager +servers: + - time +tools: + time: [get_time] +instruction: "Return London time + timezone, plus a one-line news update." +--- +``` +Card: PMO-orchestrator +```md +--- +type: agent +name: PMO-orchestrator +default: true +agents: + - NY-Project-Manager + - London-Project-Manager +instruction: "Get reports. Always use one tool call per project/news. Responsibilities: NY projects: [OpenAI, Fast-Agent, Anthropic]. London news: [Economics, Art, Culture]. Aggregate results and add a one-line PMO summary." +--- +``` + +## Sample 11: MCP OAuth minimal config +```md +mcp: + servers: + myserver: + transport: http # or sse + url: http://localhost:8001/mcp # or /sse for SSE servers + auth: + oauth: true # default: true + redirect_port: 3030 # default: 3030 + redirect_path: /callback # default: /callback + # scope: "user" # optional; if omitted, server defaults are used +``` + +## Sample 12: MCP OAuth in-memory tokens +```md +mcp: + servers: + myserver: + transport: http + url: http://localhost:8001/mcp + auth: + oauth: true + persist: memory +``` + +## Sample 13: Chain workflow (minimal) +```md +--- +type: chain +name: post_writer +sequence: + - url_fetcher + - social_media +instruction: "Generate a short social post from a URL summary." +--- +``` + +## Sample 14: Human input agent +```md +--- +type: agent +name: assistant +human_input: true +instruction: "An AI agent that assists with basic tasks. Request Human Input when needed." +--- +``` + +## Sample 15: Parallel workflow + chain +Card: translate_fr +```md +--- +type: agent +name: translate_fr +instruction: "Translate the text to French." +--- +``` +Card: translate_de +```md +--- +type: agent +name: translate_de +instruction: "Translate the text to German." +--- +``` +Card: translate_es +```md +--- +type: agent +name: translate_es +instruction: "Translate the text to Spanish." +--- +``` +Card: translate (parallel) +```md +--- +type: parallel +name: translate +fan_out: + - translate_fr + - translate_de + - translate_es +instruction: "Translate input text to multiple languages and return the combined results." +--- +``` +Card: post_writer (chain) +```md +--- +type: chain +name: post_writer +sequence: + - url_fetcher + - social_media + - translate +instruction: "Generate a post and return translated variants." +--- +``` + +## Sample 16: Evaluator-optimizer workflow +```md +--- +type: evaluator_optimizer +name: researcher +generator: web_searcher +evaluator: quality_assurance +min_rating: EXCELLENT +max_refinements: 3 +instruction: "Iterate until the evaluator approves the research output." +--- +``` + +## Sample 17: Router +```md +--- +type: router +name: route +agents: + - agent1 + - agent2 + - agent3 +instruction: "Route requests to the most appropriate agent." +--- +``` + +## Sample 18: Orchestrator +```md +--- +type: orchestrator +name: orchestrate +agents: + - task1 + - task2 + - task3 +instruction: "Plan work across agents and aggregate the results." +--- +``` + +## Sample 19: Calling agents (usage patterns) +Card: default +```md +--- +type: agent +name: default +default: true +instruction: "You are a helpful agent." +--- +``` +Card: greeter +```md +--- +type: agent +name: greeter +instruction: "Respond cheerfully!" +--- +``` +Usage: +```python +moon_size = await agent("the moon") +result = await agent.greeter("Good morning!") +result = await agent.greeter.send("Hello!") +await agent.greeter() +await agent.greeter.prompt() +await agent.greeter.prompt(default_prompt="OK") +agent["greeter"].send("Good Evening!") +``` + +## Sample 20: Basic agent definition (full params) +```md +--- +type: agent +name: agent +servers: + - filesystem +model: o3-mini.high +use_history: true +request_params: + temperature: 0.7 +human_input: true +instruction: "You are a helpful Agent." +--- +``` + +## Sample 21: Chain definition (full params) +```md +--- +type: chain +name: chain +sequence: + - agent1 + - agent2 +cumulative: false +continue_with_final: true +instruction: "instruction" +--- +``` + +## Sample 22: Parallel definition (full params) +```md +--- +type: parallel +name: parallel +fan_out: + - agent1 + - agent2 +fan_in: aggregator +include_request: true +instruction: "instruction" +--- +``` + +## Sample 23: Evaluator-optimizer definition (full params) +```md +--- +type: evaluator_optimizer +name: researcher +generator: web_searcher +evaluator: quality_assurance +min_rating: GOOD +max_refinements: 3 +instruction: "Refine outputs until quality meets the threshold." +--- +``` + +## Sample 24: Router definition (full params) +```md +--- +type: router +name: route +agents: + - agent1 + - agent2 + - agent3 +model: o3-mini.high +use_history: false +human_input: false +instruction: "Route requests based on agent capabilities." +--- +``` + +## Sample 25: Orchestrator definition (full params) +```md +--- +type: orchestrator +name: orchestrator +agents: + - agent1 + - agent2 +model: o3-mini.high +use_history: false +human_input: false +plan_type: full +plan_iterations: 5 +instruction: "instruction" +--- +``` + +## Sample 26: MAKER definition (full params) +```md +--- +type: MAKER +name: maker +worker: worker_agent +k: 3 +max_samples: 50 +match_strategy: exact +red_flag_max_length: 256 +instruction: "instruction" +--- +``` + +## Sample 27: Agents as tools (full params) +```md +--- +type: agent +name: orchestrator +agents: + - agent1 + - agent2 +history_mode: fork +max_parallel: 128 +child_timeout_sec: 600 +max_display_instances: 20 +instruction: "instruction" +--- +``` + +## Sample 28: with_resource usage +```python +summary: str = await agent.with_resource( + "Summarise this PDF please", + "mcp_server", + "resource://fast-agent/sample.pdf", +) +``` + +## Sample 29: Sampling config +```md +mcp: + servers: + sampling_resource: + command: "uv" + args: ["run", "sampling_resource_server.py"] + sampling: + model: "haiku" +``` + +## Sample 30: Name defaults to filename (single card) +```md +--- +type: agent +instruction: "Respond cheerfully!" +--- +``` diff --git a/plan/agent-card-rfc.md b/plan/agent-card-rfc.md new file mode 100644 index 000000000..42bf81862 --- /dev/null +++ b/plan/agent-card-rfc.md @@ -0,0 +1,555 @@ +# AgentCard RFC (Draft) + +## Summary +AgentCard is a text-first format (`.md` / `.yaml`) that compiles into `AgentConfig`. +A loader validates fields based on `type` and loads a single file or a directory via +`load_agents(path)`. The default path is **one card per file**. Multi-card files are +optional/experimental and described in a separate spec. +AgentCards now support an optional `description` field used for tool descriptions when +agents are exposed as tools (MCP or agent-as-tool wiring). + +## Agent vs Skill +- **Skill**: a reusable prompt fragment or capability description. +- **AgentCard**: a full runtime configuration (model, servers, tools, history source, + and instruction) that can be instantiated as an agent. +- Formats can be compatible, but the semantics are different. + +## Goals +- One canonical IR: `AgentConfig`. +- Strong validation: reject unknown fields for the given `type`. +- Deterministic parsing and minimal ambiguity. +- Simple authoring: one agent per file by default. + +## Non-goals (for now) +- Cross-file imports/includes (beyond `messages` referencing external history files). +- A rich schema migration framework. + +--- + +## Terminology +- **Card**: one AgentCard definition (`type` + attributes + instruction). +- **Frontmatter**: YAML header delimited by `---` lines in `.md` files. +- **Body**: markdown text following the frontmatter; used for instruction. +- **History file**: a separate file referenced by `messages` that seeds history. + +--- + +## Minimal Attributes +- `type`: one of `agent`, `chain`, `parallel`, `evaluator_optimizer`, `router`, + `orchestrator`, `iterative_planner`, `MAKER` +- `name`: unique card name within a load-set. + - If a file contains a **single** card and `name` is omitted, it defaults to the + filename (no extension). + - Multi-card files are optional/experimental; in that case `name` is required. +- `description`: optional. Used as the tool description when exposing agents as tools. +- `instruction`: required, and can be provided **either** in the body **or** as an + `instruction` attribute (short one-line shortcut). If both are present, it is an error. + +## Attribute Sets +- All attributes defined by the decorator for a given `type` are permitted. +- `type` determines the allowed attribute set. +- The loader enforces valid attributes and rejects unknown fields for that `type`. + +## Schema Version +- `schema_version`: optional. + - If present, must be an integer. + - Loader should default to `1` when omitted. + - Parser/loader must remain backwards-compatible within a major series when feasible. + - Loader attaches `schema_version` to the in-memory agent entry for diagnostics/dumps. + +--- + +## Supported File Formats + +### YAML Card (`.yaml` / `.yml`) +A YAML card is a single YAML document whose keys map directly to the `AgentConfig` +schema. `type` is optional and defaults to `agent`. Use `instruction: |` for +multiline prompts. + +Example: +```yaml +type: agent +name: sizer +instruction: | + Given an object, respond only with an estimate of its size. +``` + +### Markdown Card (`.md` / `.markdown`) +A Markdown card is YAML frontmatter followed by an optional body. The body is treated +as the system instruction unless `instruction` is provided in frontmatter. `type` +is optional and defaults to `agent`. +UTF-8 BOM should be tolerated. + +Example: +```md +--- +type: agent +name: sizer +--- +Given an object, respond only with an estimate of its size. +``` + +--- + +## 1:1 Card ↔ Decorator Mapping (Strict Validator) +Use this mapping to validate allowed fields for each `type`. Fields not listed for a +type are invalid. Card-only fields (`schema_version`, `messages`) are listed explicitly. + +Code-only decorator args that are **not** representable in AgentCard: +- `instruction_or_kwarg` (positional instruction) +- `elicitation_handler` (callable) +- `tool_runner_hooks` (hook object) + +### type: `agent` (maps to `@fast.agent`) +Allowed fields: +- `name`, `instruction`, `description`, `default` +- `agents` (agents-as-tools) +- `servers`, `tools`, `resources`, `prompts`, `skills` +- `model`, `use_history`, `request_params`, `human_input`, `api_key` +- `history_source`, `history_merge_target` +- `max_parallel`, `child_timeout_sec`, `max_display_instances` +- `function_tools`, `tool_hooks` (see separate spec) +- `messages` (card-only history file) + +### type: `chain` (maps to `@fast.chain`) +Allowed fields: +- `name`, `instruction`, `description`, `default` +- `sequence`, `cumulative` + +### type: `parallel` (maps to `@fast.parallel`) +Allowed fields: +- `name`, `instruction`, `description`, `default` +- `fan_out`, `fan_in`, `include_request` + +### type: `evaluator_optimizer` (maps to `@fast.evaluator_optimizer`) +Allowed fields: +- `name`, `instruction`, `description`, `default` +- `generator`, `evaluator` +- `min_rating`, `max_refinements`, `refinement_instruction` +- `messages` (card-only history file) + +### type: `router` (maps to `@fast.router`) +Allowed fields: +- `name`, `instruction`, `description`, `default` +- `agents` +- `servers`, `tools`, `resources`, `prompts` +- `model`, `use_history`, `request_params`, `human_input`, `api_key` +- `messages` (card-only history file) + +### type: `orchestrator` (maps to `@fast.orchestrator`) +Allowed fields: +- `name`, `instruction`, `description`, `default` +- `agents` +- `model`, `use_history`, `request_params`, `human_input`, `api_key` +- `plan_type`, `plan_iterations` +- `messages` (card-only history file) + +### type: `iterative_planner` (maps to `@fast.iterative_planner`) +Allowed fields: +- `name`, `instruction`, `description`, `default` +- `agents` +- `model`, `request_params`, `api_key` +- `plan_iterations` +- `messages` (card-only history file) + +### type: `MAKER` (maps to `@fast.maker`) +Allowed fields: +- `name`, `instruction`, `description`, `default` +- `worker` +- `k`, `max_samples`, `match_strategy`, `red_flag_max_length` +- `messages` (card-only history file) + +### Card-only fields (all types) +- `schema_version` (optional) + +--- + +## Instruction Source +- **One source only**: either the body **or** the `instruction` attribute. +- If both are present, the loader must raise an error. +- If `instruction` is provided, the body must be empty (whitespace-only allowed). +- The body may start with an optional `---SYSTEM` marker to make the role explicit. + +--- + +## History Preload (`messages`) +History is **external only**. Inline `---USER` / `---ASSISTANT` blocks inside the +AgentCard body are **not supported**. + +### `messages` attribute shape +- `messages: ./history.md` (string) +- `messages: [./history.md, ./fewshot.json]` (list) + +### Path resolution +- Relative paths are resolved relative to the card file directory. + +### History file formats +History files use the same formats as `fast-agent` history save/load: +- **`.json`**: PromptMessageExtended JSON (`{"messages": [...]}`), including tool calls + and other extended fields. This is the format written by `/save_history` when the + filename ends in `.json`. +- **Text/Markdown (`.md`, `.txt`, etc.)**: delimited format with role markers: + - `---USER` + - `---ASSISTANT` + - `---RESOURCE` (followed by JSON for embedded resources) + If a file contains no delimiters, it is treated as a single user message. + +History is its own file type; it is not embedded inside AgentCard files. + +--- + +## Agents-as-Tools History Controls (Proposed) +These options define **where child clones fork history from** and **where merged +history lands**. This addresses the open questions from issue #202 about fork/merge +scope. + +### Fields (AgentCard) +These fields are set on the **orchestrator** (parent) AgentCard because it +controls child invocation and the initial context passed to child agents. + +- `history_source`: `none` | `messages` | `child` | `orchestrator` | `cumulative` *) +- `history_merge_target`: `none` | `messages` **) | `child` | `orchestrator` | `cumulative` *) + +Defaults (change current behavior to a cleaner baseline): +- `history_source`: `none` +- `history_merge_target`: `none` + +Notes: +- `history_source=none`: no forked history is loaded (child starts empty). +- `history_source=messages`: fork base is loaded from the `messages` history file. +- `history_source=child`: fork base is the child template agent’s `message_history`. +- `history_source=orchestrator`: fork base is the parent/orchestrator `message_history`. +- `history_source=cumulative`: fork base is the session-wide merged transcript (not yet implemented). +- `history_merge_target=none`: no merge back occurs. +- `history_merge_target=messages`: merge back into the `messages` history file. +- `history_merge_target=child`: merge back into the child template agent’s `message_history`. +- `history_merge_target=orchestrator`: merge back into the parent/orchestrator `message_history`. +- `history_merge_target=cumulative`: merge back into the session-wide transcript (not yet implemented). + +MVP path 1: +- `/card --tool` starts in **stateless** mode (`history_source=none`, `history_merge_target=none`), + i.e. fresh clone per call with no history load or merge. +MVP path 2: +- Advanced history modes should be exercised first in the Agents-as-Tools workflow + before being applied to `/card --tool`. + +*) *Footnote:* there is no cumulative (session-wide merged) history store today; +it would need to be designed and implemented as a separate feature. +**) *Footnote:* writing merged history to a file-based `history_merge_target` +requires a read/write lock and is deferred to a separate implementation. + +### Python API (proposed) +```python +AgentsAsToolsOptions( + history_source="none", + history_merge_target="none", +) +``` + +### CLI flags (proposed) +``` +--child-history-source {none,messages,child,orchestrator,cumulative} +--child-history-merge-target {none,messages,child,orchestrator,cumulative} +``` + +### `/call` or tool invocation (proposed) +If a `/call` command (or MCP tool wrapper) is introduced for ad-hoc child calls, +it should accept the same options as overrides for the current invocation: +``` +/call --history-source orchestrator --history-merge-target orchestrator +``` + +Rationale for open questions: +- There are **two plausible histories**: the child template history (stable per agent) + and the orchestrator history (dynamic per session). Both are valid depending + on whether you want a child to act with its own memory or respond in the + orchestrator’s current context. A third option, **cumulative**, represents a + session-wide merged transcript across agents (if implemented). +- Merge destination is ambiguous: merging back into the child template is useful for + long-lived agent memory; merging into the orchestrator is useful for building a + shared session transcript. + +--- + +## MCP Servers and Tool Filters (YAML) +Match the existing decorator semantics: +- `servers`: list of MCP server names (strings), resolved via `fastagent.config.yaml`. +- `tools`: optional mapping `{server_name: [tool_name_or_pattern, ...]}`. + - If omitted, all tools for that server are allowed. + +Example: +```yaml +servers: + - time + - github + - filesystem +tools: + time: [get_time] + github: [search_*] +``` + +--- + +## Precedence +1) CLI flags (highest priority) +2) AgentCard fields +3) `fastagent.config.yaml` + +This applies to model selection, request params, servers, and other overlapping fields. + +--- + +## Function Tools and Hooks (Separate Spec) +Function tool and hook wiring is evolving and documented separately. +See: [plan/hook-tool-declarative.md](plan/hook-tool-declarative.md) (current branch changes live there). + +--- + +## Examples + +### Basic agent card +```md +--- +type: agent +name: sizer +--- +Given an object, respond only with an estimate of its size. +``` + +### Agent with servers and child agents +```md +--- +type: agent +name: PMO-orchestrator +servers: + - time + - github +agents: + - NY-Project-Manager + - London-Project-Manager +tools: + time: [get_time] + github: [search_*] +--- +Get reports. Always use one tool call per project/news. +Responsibilities: NY projects: [OpenAI, Fast-Agent, Anthropic]. +London news: [Economics, Art, Culture]. +Aggregate results and add a one-line PMO summary. +``` + +### Agent with external history +```md +--- +type: agent +name: analyst +messages: ./history.md +--- +You are a concise analyst. +``` + +--- + +## Loading API +- `load_agents(path)` loads a file or a directory and returns the loaded agent names. +- CLI: `fast-agent go --card ` loads cards before starting. +- `--agent-cards` remains as a legacy alias for `--card`. +- Loading is immediate (no deferred mode). +- All loaded agents are tracked with a name and source file path. +- If a subsequent `load_agents(path)` call does not include a previously loaded agent + from that path, the agent is removed. +- TUI: `/card [--tool]` loads cards at runtime. Autocomplete filters for + AgentCard file extensions. +- ACP slash commands: `/card [--tool]` loads cards at runtime and refreshes + modes for the current session. + +### Runtime tool injection (optional) +- `/card --tool` exposes the loaded agent as a tool on the **current** agent. +- Tool names default to `agent__{name}`. +- Tool descriptions prefer `description`; fall back to the agent instruction. +- Default behavior is **stateless**: fresh clone per call with no history load or merge + (`history_source=none`, `history_merge_target=none`). + +### Example: export AgentCards from a Python workflow +```bash +cd examples/workflows + +uv run agents_as_tools_extended.py --dump ../workflows-md/agents_as_tools_extended +``` + +### Example: run interactive with hot lazy swap +```bash +cd examples/workflows-md + +uv run fast-agent go --card agents_as_tools_extended --watch +``` + +Manual reload: +```bash +cd examples/workflows-md + +uv run fast-agent go --card agents_as_tools_extended --reload +``` + +One-shot message: +```bash +cd examples/workflows-md + +uv run fast-agent go --card agents_as_tools_extended --message "go" +``` + +### Example: load a directory in Python +```python +import asyncio + +from fast_agent import FastAgent + +fast = FastAgent("workflows-md") +fast.load_agents("/home/strato-space/fast-agent/examples/workflows-md/agents_as_tools_extended") + + +async def main() -> None: + async with fast.run() as app: + await app.interactive() + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Export / Dump (CLI) +- Default export format is Markdown (frontmatter + body), matching SKILL.md style. +- `--dump ` (alias: `--dump-agents`): after loading, export all loaded agents to `` as + Markdown AgentCards (`.md`). Instruction is written to the body. +- `--dump-yaml ` (alias: `--dump-agents-yaml`): export all loaded agents as YAML AgentCards + (`.yaml`) with `instruction` in the YAML field. +- `--dump-agent --dump-agent-path `: export a single agent as Markdown + (default) to a file. +- `--dump-agent-yaml`: export a single agent as YAML (used with `--dump-agent` and + `--dump-agent-path`). + - Optional future enhancement: after dumping, print a ready-to-run CLI example + for the current directory (e.g. `fast-agent go --card --watch`). + +## Interactive vs One-Shot CLI +- **Interactive**: `fast-agent go --card ` launches the TUI, waits for + user input, and keeps session state (history, tools, prompts) in memory. +- **One-shot**: `fast-agent go --card --message "..."` sends a single + request and exits. `--prompt-file` loads a prompt/history file, runs it, then + exits (or returns to interactive if explicitly invoked). + +## Reload / Watch Behavior (Lazy Hot-Reload) +Both `--reload` and `--watch` use the same **lazy hot-reload** semantics. The loader +tracks `registry_version` (monotonic counter) and a per-file cache: +`path -> (mtime_ns, size, agent_name)`. + +On each reload pass, only **changed** files are re-read: +- If `mtime_ns` or `size` differs, the file is re-parsed and its agents are updated. +- If a file disappears, its agents are removed from the registry. +- If a new file appears, its agents are added. + +After a reload pass, `registry_version` is bumped if any changes were applied. +Runtime instances compare `instance_version` to the registry. If +`registry_version > instance_version`, a new instance is created on the next +eligible boundary. + +### `--reload` (manual) +- No filesystem watcher. +- Reload is triggered explicitly (e.g. `/reload` in TUI; ACP/tool hooks pending). +- The loader performs an mtime-based incremental reload and updates the registry. + +### `--watch` (automatic) +- OS file events trigger reload passes when `watchfiles` is available. Otherwise, + the watcher falls back to mtime/size polling. +- Only changed files are re-read using the same mtime/size cache. +- No immediate restart; the swap happens lazily on the next request/connection. + +### Instance scope behavior +- `instance_scope=shared`: on the **next request**, if version changed, the shared + instance is recreated once (under lock), then reused for subsequent requests. +- `instance_scope=connection`: version check occurs when a new connection is opened; + existing connections keep their old instance. +- `instance_scope=request`: a new instance is created per request, so the latest + registry is always used. + +### Force reload +- A “force” reload is a full runtime restart (process-level) to guarantee a clean + Python module state. + +## Tools Exposure (fast-agent-mcp) +Expose loader utilities via internal MCP tools: +- `fast-agent-mcp.load_agents(path)` + +--- + +## Appendix: Multi-card Spec (Experimental) +See [plan/agent-card-rfc-multicard.md](plan/agent-card-rfc-multicard.md). + +## Appendix: Current History Preload (Code) +- `save_messages(...)` and `load_messages(...)` in + `src/fast_agent/mcp/prompt_serialization.py` +- Delimiter constants in `src/fast_agent/mcp/prompts/prompt_constants.py` +- `load_history_into_agent(...)` in `src/fast_agent/mcp/prompts/prompt_load.py` +- `/save_history` implementation in `src/fast_agent/llm/fastagent_llm.py` +- CLI `--prompt-file` loader in `src/fast_agent/cli/commands/go.py` + +--- + +## Appendix: AgentCard Samples +See [plan/agent-card-rfc-sample.md](plan/agent-card-rfc-sample.md). + +## Appendix: Code Review Findings +- `/card --tool` can inject the **current agent as a tool** when the loaded card + set includes it. This creates a self-referential tool and can recurse if the + model calls it. Filter out the current agent and dedupe tool names. + (`src/fast_agent/ui/interactive_prompt.py`, `src/fast_agent/acp/slash_commands.py`) +- `add_agent_tool` forwards to the **live child instance** (`child.send`) rather + than a detached clone. This diverges from Agents-as-Tools isolation semantics + and can leak history/usage across parallel calls. + (`src/fast_agent/agents/tool_agent.py`) +- AgentCard `type` currently defaults to `agent` when missing. If strict validation + is expected, this should be an error (otherwise unrelated frontmatter files are + accepted silently). + (`src/fast_agent/core/agent_card_loader.py`) +- Tool injection is **ephemeral**; after `--watch` refresh or reload, injected tools + are lost with no warning. + (`src/fast_agent/ui/interactive_prompt.py`, `src/fast_agent/acp/slash_commands.py`) + +## Appendix: Code Review Fix Plan +General plan: extract the child-tool execution helpers from `AgentsAsToolsAgent` +and reuse them in the `/card --tool` flow so injected agent tools behave the same +as agents-as-tools (detached clones, optional history merge, usage rollup). + +Proposed steps: +1) **Extract shared helpers** from `AgentsAsToolsAgent` into a small module, e.g. + `fast_agent/agents/agent_tool_helpers.py`: + - `serialize_tool_args(args) -> str` + - `spawn_child_clone(child, instance_name, history_source)` + - `invoke_child_tool(clone, args, suppress_display)` + - `merge_child_usage_and_history(child, clone, merge_target)` +2) **Refactor `AgentsAsToolsAgent`** to call these helpers without changing behavior. + This keeps parity with current features (history modes, progress, usage merge). +3) **Update `/card --tool` path** (TUI + ACP): + - Filter out the current agent from `loaded_names` to avoid self-tools. + - Use the shared helpers to create a tool wrapper that spawns detached clones + per call (not the live child instance). + - Deduplicate tools by name and surface a warning if a tool already exists. +4) **Add tests**: + - `/card --tool` does not inject self. + - Injected tools use detached clones (no shared history). + - History merge behavior respects `history_source` and `history_merge_target`. +5) **ACP coverage**: + - Ensure `/card` updates available commands and keeps session modes consistent. + - Validate tool injection works in ACP and TUI with identical behavior. + +## Appendix: Next-stage Work Items +- **Cumulative session history**: no shared, merged transcript exists today; requires + a session-level history store and clear rules for when/what each agent writes. + +## Appendix: History Mode Removal +`history_mode` is removed in this spec and replaced by the orthogonal pair +`history_source` + `history_merge_target`. + +## Appendix: Open Questions / Remaining Work +- Implement stateless `/card --tool` via detached clones (ToolAgent parity). +- Decide whether `/card --subagent` is needed as a distinct primitive from tool injection. +- Define how (or if) advanced history routing is exposed outside agents-as-tools. +- Confirm whether a shared “cumulative” history across `@agent` switches is desired. diff --git a/plan/hook-tool-declarative.md b/plan/hook-tool-declarative.md new file mode 100644 index 000000000..900edfbb6 --- /dev/null +++ b/plan/hook-tool-declarative.md @@ -0,0 +1,240 @@ +# Hook Tool Declarative Spec (Experimental) + +This spec adds a declarative API so any agent can mix MCP servers/tools, local Python +function tools, child agents-as-tools, and tool hooks in one place. The hook design is +nginx-style middleware: it can run before, instead, or after the original tool, and can +mutate args/results or skip execution. + +Status: experimental (intended for maintainer review). + +## Goals + +- Declarative tools + hooks on `@fast.agent` (no custom ToolAgent subclass required). +- Mix in one declaration: `servers`, MCP `tools` filters, `function_tools`, `agents` + (agents-as-tools), and `tool_hooks`. +- Hooks apply uniformly to all tool types (MCP, function, agent, built-ins). +- Hooks can inspect agent/tool identity, mutate args/results, or short-circuit execution. + +## Declarative API (proposed, examples/workflows/agents_as_tools_extended) + +`agents_as_tools_extended.py` +```python +@fast.agent( + name="PMO-orchestrator", + servers=["time"], + tools={"time": ["get_time"]}, # MCP filters + function_tools=[add_one], # local Python tools + agents=["NY-Project-Manager", "London-Project-Manager"], # agents-as-tools + tool_hooks=[audit_hook], # applies to all tools + instruction="Get project updates from the New York and London project managers and include the current time. Ask NY-Project-Manager three times about different projects: Anthropic, evalstate/fast-agent, and OpenAI, and London-Project-Manager for economics review. Return a brief, concise combined summary with clear city/time/topic labels.", +) +``` + +### AgentCard example (proposed, examples/workflows-md/agents_as_tools_extended) + +`PMO-orchestrator.md` +```md +--- +type: agent +name: PMO-orchestrator +default: true +servers: + - time +tools: + time: [get_time] +function_tools: + - tools.py:add_one +agents: + - NY-Project-Manager + - London-Project-Manager +tool_hooks: + - hooks.py:audit_hook +history_mode: scratch +max_parallel: 128 +child_timeout_sec: 120 +max_display_instances: 20 +--- +Get project updates from the New York and London project managers and include the current time. Ask NY-Project-Manager three times about different projects: Anthropic, evalstate/fast-agent, and OpenAI, and London-Project-Manager for economics review. Return a brief, concise combined summary with clear city/time/topic labels. +``` + +Notes: +- `tools={...}` remains MCP tool filtering only. +- `function_tools=[...]` accepts callables or `"module.py:function"` strings. +- When loaded from an AgentCard, relative paths are resolved against the card directory. +- AgentCard supports **string specs only**; callables are only valid in Python decorators. +- `tool_hooks=[...]` is a new middleware layer around every tool call. + +## Function tool loading (implemented) + +The current implementation in `src/fast_agent/tools/function_tool_loader.py` loads +function tools as follows: + +- `callable` entries are wrapped via `FastMCPTool.from_function`. +- String specs must be `module.py:function_name` and are loaded dynamically from file. +- Relative module paths are resolved against a `base_path` (AgentCard directory); if + no base path is provided, `cwd` is used. +- Errors raise (invalid format, missing file, missing attribute, non-callable). The loader + logs the failure and re-raises to avoid silent misconfiguration. +- The module name is generated uniquely (`_function_tool__`) to avoid collisions. + +Implication for hooks: hook loaders should mirror this behavior to keep string specs +consistent across function_tools and tool_hooks. Hook loading is based on the same +Function tool loading implementation. + +## Hook signature and behavior (nginx-style) + +### Core types + +```python +from dataclasses import dataclass +from typing import Any, Awaitable, Callable, Literal +from mcp.types import CallToolResult + +ToolCallArgs = dict[str, Any] | None +ToolCallFn = Callable[[ToolCallArgs], Awaitable[CallToolResult]] + +@dataclass(frozen=True) +class ToolHookContext: + agent_name: str + server_name: str | None + tool_name: str + tool_source: Literal["mcp", "function", "agent", "runtime"] + tool_use_id: str | None + correlation_id: str | None + original_tool_func: ToolCallFn + +ToolHookFn = Callable[[ToolHookContext, ToolCallArgs, ToolCallFn], Awaitable[CallToolResult]] +``` + +### Semantics + +Each hook receives: +- `agent_name`, `server_name`, `tool_name` (identity) +- `original_tool_func` (the underlying tool callable) +- `args` (mutable tool arguments) +- `call_next` (the next hook in chain; last call invokes `original_tool_func`) + +This supports **before / instead / after** behavior: + +```python +async def safety_guard(ctx, args, call_next): + # before: mutate args or enforce limits + args = clamp_args(args) + + # instead: decide not to call the tool + if ctx.tool_name == "shell.execute": + return CallToolResult(isError=True, content=[text_content("blocked")]) + + # call underlying tool (or next hook) + result = await call_next(args) + + # after: mutate or log result + return redact_result(result) +``` + +Multiple hooks compose like middleware; order is the order declared. + +Error handling: +- If a hook raises, the exception bubbles; the tool loop records an error result. + +## Tool identity mapping + +`tool_hooks` must apply uniformly. Proposed mapping: + +- MCP tools: `tool_source="mcp"`, `server_name=`, `tool_name=` +- Local function tools: `tool_source="function"`, `server_name=None` (or "local") +- Agents-as-tools: `tool_source="agent"`, `server_name="agent"`, `tool_name=agent__Child` +- Built-in runtimes (shell, filesystem, human-input, skill reader): + `tool_source="runtime"`, `server_name="runtime"` (or specific runtime name) + +Hooks can branch on `tool_source`, `server_name`, and `tool_name`. + +## Current state (AgentCard branch) + +- AgentCard loader + CLI: `--card` / `--agent-cards`, supports URL cards. +- `function_tools` supported in `@fast.agent`, `@fast.custom`, and AgentCards. +- `function_tool_loader.py` supports callable or `module.py:function` specs. +- `tool_hooks` implementation and tests were removed; hooks are currently missing. + +## Implementation plan (compact) + +1) Data model + parsing +- Add `ToolHookConfig` type alias in `src/fast_agent/agents/agent_types.py`. +- Extend AgentConfig with `tool_hooks: ToolHooksConfig | None`. +- Update `@fast.agent` and `@fast.custom` signatures to accept `tool_hooks`. +- Update `agent_card_loader.py` to parse `tool_hooks` from YAML/MD. + - AgentCard supports **string specs only**; callables are only allowed in decorators. + +2) Loader reuse +- Prefer reusing `function_tool_loader.py` with a generic callable loader. +- Load hook functions from `module.py:function` specs. +- Validate callability; raise with clear error on mismatch. + +3) Runtime wiring +- Restore `ToolHookContext` + `run_tool_with_hooks` (module `agents/tool_hooks.py`). +- Reapply hook execution around tool calls: + - `ToolAgent.call_tool` wraps local function tools. + - `McpAgent.call_tool` wraps MCP tools, runtime tools, and agents-as-tools. +- Preserve `tool_use_id` and `correlation_id` in context. +- Keep ToolRunnerHooks unchanged; tool_hooks is independent middleware. + +4) Tests +- Unit: hook chain order, before/instead/after behavior, skip execution. +- Integration: restore declarative hooks test and add AgentCard hook test. + +5) Examples + docs +- Extend `examples/workflows/agents_as_tools_extended.py` with hook usage. +- Update `examples/workflows-md/agents_as_tools_extended` AgentCards to match. +- Review `examples/workflows-md/hf-api-agent` before adding hook samples. +- Update this spec and CLI README. + +## Open question +- Do we want a strict signature check for hooks at load time? + +## Examples (planned) + +### 1) Mixed MCP + function tools + agents + hooks + +```python +def add_one(x: int) -> int: + return x + 1 + + +async def audit_hook(ctx, args, call_next): + # before: enforce limits + if ctx.tool_name.endswith("add_one"): + args = dict(args or {}) + args["x"] = min(int(args.get("x", 0)), 10) + + # instead: block unsafe tools + if ctx.tool_source == "runtime" and ctx.tool_name == "shell.execute": + return CallToolResult(isError=True, content=[text_content("blocked")]) + + # call original tool + result = await call_next(args) + + # after: log or modify result + result.content.append(text_content("[audit]")) + return result + + +@fast.agent( + name="PMO-orchestrator", + instruction="Get project updates from the New York and London project managers and include the current time. Ask NY-Project-Manager three times about different projects: Anthropic, evalstate/fast-agent, and OpenAI, and London-Project-Manager for economics review. Return a brief, concise combined summary with clear city/time/topic labels.", + agents=["NY-Project-Manager", "London-Project-Manager"], + servers=["time"], + tools={"time": ["get_time"]}, + function_tools=[add_one], + tool_hooks=[audit_hook], + default=True, +) +async def main() -> None: + async with fast.run() as agent: + await agent("Run PMO report and add 1 to 3") +``` + +## Related examples (existing) + +- Function tools: `examples/new-api/simple_llm.py` and `examples/tool-use-agent/agent.py` +- Tool runner hooks: `examples/tool-runner-hooks/tool_runner_hooks.py` +- Agents-as-tools: `examples/workflows/agents_as_tools_extended.py` diff --git a/src/fast_agent/acp/server/agent_acp_server.py b/src/fast_agent/acp/server/agent_acp_server.py index 325254608..9fe2ce064 100644 --- a/src/fast_agent/acp/server/agent_acp_server.py +++ b/src/fast_agent/acp/server/agent_acp_server.py @@ -200,6 +200,8 @@ def __init__( server_version: str | None = None, skills_directory_override: Sequence[str | Path] | str | Path | None = None, permissions_enabled: bool = True, + get_registry_version: Callable[[], int] | None = None, + load_card_callback: Callable[[str], Awaitable[list[str]]] | None = None, ) -> None: """ Initialize the ACP server. @@ -213,6 +215,7 @@ def __init__( server_version: Version of the server (defaults to fast-agent version) skills_directory_override: Optional skills directory override (relative to session cwd) permissions_enabled: Whether to request tool permissions from client (default: True) + load_card_callback: Optional callback to load AgentCards at runtime """ super().__init__() @@ -220,6 +223,11 @@ def __init__( self._create_instance_task = create_instance self._dispose_instance_task = dispose_instance self._instance_scope = instance_scope + self._get_registry_version = get_registry_version + self._load_card_callback = load_card_callback + self._primary_registry_version = getattr(primary_instance, "registry_version", 0) + self._shared_reload_lock = asyncio.Lock() + self._stale_instances: list[AgentInstance] = [] self.server_name = server_name self._skills_directory_override = skills_directory_override self._permissions_enabled = permissions_enabled @@ -531,6 +539,172 @@ def _build_session_request_params( return None return RequestParams(systemPrompt=resolved) + async def _maybe_refresh_shared_instance(self) -> None: + if self._instance_scope != "shared" or not self._get_registry_version: + return + if self._active_prompts: + return + + latest_version = self._get_registry_version() + if latest_version <= self._primary_registry_version: + return + + async with self._shared_reload_lock: + if self._active_prompts: + return + latest_version = self._get_registry_version() + if latest_version <= self._primary_registry_version: + return + + new_instance = await self._create_instance_task() + old_instance = self.primary_instance + self.primary_instance = new_instance + self._primary_registry_version = getattr( + new_instance, "registry_version", latest_version + ) + self._stale_instances.append(old_instance) + self.primary_agent_name = self._select_primary_agent(new_instance) + await self._refresh_sessions_for_instance(new_instance) + + async def _refresh_sessions_for_instance(self, instance: AgentInstance) -> None: + async with self._session_lock: + for session_id, session_state in self._session_state.items(): + self.sessions[session_id] = instance + session_state.instance = instance + self._refresh_session_state(session_state, instance) + + def _refresh_session_state( + self, session_state: ACPSessionState, instance: AgentInstance + ) -> None: + prompt_context = session_state.prompt_context or {} + resolved_for_session: dict[str, str] = {} + for agent_name, agent in instance.agents.items(): + template = getattr(agent, "instruction", None) + if not template: + continue + resolved = apply_template_variables(template, prompt_context) + if resolved: + resolved_for_session[agent_name] = resolved + session_state.resolved_instructions = resolved_for_session + + for agent_name, agent in instance.agents.items(): + if isinstance(agent, InstructionContextCapable): + try: + agent.set_instruction_context(prompt_context) + except Exception as exc: + logger.warning( + "Failed to set instruction context on agent", + name="acp_instruction_context_failed", + session_id=session_state.session_id, + agent_name=agent_name, + error=str(exc), + ) + + if session_state.terminal_runtime: + for agent_name, agent in instance.agents.items(): + if ( + isinstance(agent, ShellRuntimeCapable) + and agent._shell_runtime_enabled + ): + agent.set_external_runtime(session_state.terminal_runtime) + + if session_state.filesystem_runtime: + for agent_name, agent in instance.agents.items(): + if isinstance(agent, FilesystemRuntimeCapable): + agent.set_filesystem_runtime(session_state.filesystem_runtime) + + async def load_card(source: str) -> tuple[AgentInstance, list[str]]: + return await self._load_agent_card_for_session(session_state, source) + + slash_handler = SlashCommandHandler( + session_state.session_id, + instance, + self.primary_agent_name or "default", + client_info=self._client_info, + client_capabilities=self._client_capabilities, + protocol_version=self._protocol_version, + session_instructions=resolved_for_session, + card_loader=load_card if self._load_card_callback else None, + ) + session_state.slash_handler = slash_handler + + current_agent = session_state.current_agent_name + if not current_agent or current_agent not in instance.agents: + current_agent = self.primary_agent_name or next(iter(instance.agents.keys()), None) + session_state.current_agent_name = current_agent + if current_agent and session_state.slash_handler: + session_state.slash_handler.set_current_agent(current_agent) + + session_modes = self._build_session_modes(instance, session_state) + if current_agent and current_agent in instance.agents: + session_modes = SessionModeState( + available_modes=session_modes.available_modes, + current_mode_id=current_agent, + ) + + if session_state.acp_context: + session_state.acp_context.set_slash_handler(slash_handler) + session_state.acp_context.set_resolved_instructions(resolved_for_session) + session_state.acp_context.set_available_modes(session_modes.available_modes) + if current_agent: + session_state.acp_context.set_current_mode(current_agent) + + async def _load_agent_card_for_session( + self, session_state: ACPSessionState, source: str + ) -> tuple[AgentInstance, list[str]]: + if not self._load_card_callback: + raise RuntimeError("AgentCard loading is not available.") + + loaded_names = await self._load_card_callback(source) + + if self._instance_scope == "shared": + async with self._shared_reload_lock: + new_instance = await self._create_instance_task() + old_instance = self.primary_instance + self.primary_instance = new_instance + latest_version = ( + self._get_registry_version() if self._get_registry_version else None + ) + self._primary_registry_version = getattr( + new_instance, "registry_version", latest_version + ) + self._stale_instances.append(old_instance) + self.primary_agent_name = self._select_primary_agent(new_instance) + await self._refresh_sessions_for_instance(new_instance) + instance = session_state.instance + else: + instance = await self._create_instance_task() + old_instance = session_state.instance + session_state.instance = instance + async with self._session_lock: + self.sessions[session_state.session_id] = instance + self._refresh_session_state(session_state, instance) + if old_instance != self.primary_instance: + try: + await self._dispose_instance_task(old_instance) + except Exception as exc: + logger.warning( + "Failed to dispose old session instance", + name="acp_card_dispose_error", + session_id=session_state.session_id, + error=str(exc), + ) + + if session_state.acp_context: + await session_state.acp_context.send_available_commands_update() + + return instance, loaded_names + + async def _dispose_stale_instances_if_idle(self) -> None: + if self._active_prompts: + return + if not self._stale_instances: + return + stale = list(self._stale_instances) + self._stale_instances.clear() + for instance in stale: + await self._dispose_instance_task(instance) + def _build_status_line_meta( self, agent: Any, turn_start_index: int | None ) -> dict[str, Any] | None: @@ -574,6 +748,8 @@ async def new_session( mcp_server_count=len(mcp_servers), ) + await self._maybe_refresh_shared_instance() + async with self._session_lock: # Determine which instance to use based on scope if self._instance_scope == "shared": @@ -786,6 +962,9 @@ async def new_session( # Create slash command handler for this session resolved_prompts = session_state.resolved_instructions + async def load_card(source: str) -> tuple[AgentInstance, list[str]]: + return await self._load_agent_card_for_session(session_state, source) + slash_handler = SlashCommandHandler( session_id, instance, @@ -794,6 +973,7 @@ async def new_session( client_capabilities=self._client_capabilities, protocol_version=self._protocol_version, session_instructions=resolved_prompts, + card_loader=load_card if self._load_card_callback else None, ) session_state.slash_handler = slash_handler @@ -1001,6 +1181,8 @@ async def prompt( session_id=session_id, ) + await self._maybe_refresh_shared_instance() + # Check for overlapping prompt requests (per ACP protocol requirement) async with self._session_lock: if session_id in self._active_prompts: @@ -1337,6 +1519,7 @@ def on_stream_chunk(chunk: StreamChunk): name="acp_prompt_complete", session_id=session_id, ) + await self._dispose_stale_instances_if_idle() async def cancel(self, session_id: str, **kwargs: Any) -> None: """ @@ -1525,6 +1708,17 @@ async def _cleanup_sessions(self) -> None: name="acp_cleanup_error", ) + if self._stale_instances: + for instance in list(self._stale_instances): + try: + await self._dispose_instance_task(instance) + except Exception as e: + logger.error( + f"Error disposing stale instance: {e}", + name="acp_cleanup_error", + ) + self._stale_instances.clear() + self.sessions.clear() logger.info("ACP cleanup complete") diff --git a/src/fast_agent/acp/slash_commands.py b/src/fast_agent/acp/slash_commands.py index 34c7ea7d2..0bc009072 100644 --- a/src/fast_agent/acp/slash_commands.py +++ b/src/fast_agent/acp/slash_commands.py @@ -11,12 +11,13 @@ from __future__ import annotations +import shlex import textwrap import time import uuid from importlib.metadata import version as get_version from pathlib import Path -from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Protocol, runtime_checkable from acp.helpers import text_block, tool_content from acp.schema import ( @@ -116,6 +117,7 @@ def __init__( client_capabilities: dict | None = None, protocol_version: int | None = None, session_instructions: dict[str, str] | None = None, + card_loader: Callable[[str], Awaitable[tuple["AgentInstance", list[str]]]] | None = None, ): """ Initialize the slash command handler. @@ -145,6 +147,7 @@ def __init__( self.client_capabilities = client_capabilities self.protocol_version = protocol_version self._session_instructions = session_instructions or {} + self._card_loader = card_loader # Session-level commands (always available, operate on current agent) self._session_commands: dict[str, AvailableCommand] = { @@ -182,6 +185,13 @@ def __init__( description="Load conversation history from file", input=AvailableCommandInput(root=UnstructuredCommandInput(hint="")), ), + "card": AvailableCommand( + name="card", + description="Load an AgentCard from file or URL", + input=AvailableCommandInput( + root=UnstructuredCommandInput(hint=" [--tool]") + ), + ), } def get_available_commands(self) -> list[AvailableCommand]: @@ -335,6 +345,8 @@ async def execute_command(self, command_name: str, arguments: str) -> str: return await self._handle_clear(arguments) if command_name == "load": return await self._handle_load(arguments) + if command_name == "card": + return await self._handle_card(arguments) # Check agent-specific commands agent = self._get_current_agent() @@ -1254,6 +1266,71 @@ async def _handle_load(self, arguments: str | None = None) -> str: ] ) + async def _handle_card(self, arguments: str | None = None) -> str: + """Handle the /card command by loading an AgentCard and refreshing agents.""" + if not self._card_loader: + return "AgentCard loading is not available in this session." + + args = (arguments or "").strip() + if not args: + return "Filename required for /card command.\nUsage: /card [--tool]" + + try: + tokens = shlex.split(args) + except ValueError as exc: + return f"Invalid arguments: {exc}" + + add_tool = False + filename = None + for token in tokens: + if token in {"tool", "--tool", "--as-tool", "-t"}: + add_tool = True + continue + if filename is None: + filename = token + + if not filename: + return "Filename required for /card command.\nUsage: /card [--tool]" + + try: + instance, loaded_names = await self._card_loader(filename) + except Exception as exc: + return f"AgentCard load failed: {exc}" + + self.instance = instance + + if not loaded_names: + summary = "AgentCard loaded." + else: + summary = "Loaded AgentCard(s): " + ", ".join(loaded_names) + + if not add_tool: + return summary + + parent_name = self.current_agent_name + if not parent_name or parent_name not in instance.agents: + parent_name = next(iter(instance.agents.keys()), None) + self.current_agent_name = parent_name or self.current_agent_name + if not parent_name: + return summary + + parent = instance.agents.get(parent_name) + add_tool_fn = getattr(parent, "add_agent_tool", None) + if not callable(add_tool_fn): + return f"{summary}\nCurrent agent does not support tool injection." + + added_tools: list[str] = [] + for child_name in loaded_names: + child = instance.agents.get(child_name) + if child is None: + continue + tool_name = add_tool_fn(child) + added_tools.append(tool_name) + + if not added_tools: + return summary + return f"{summary}\nAdded tool(s): {', '.join(added_tools)}" + async def _handle_clear(self, arguments: str | None = None) -> str: """Handle /clear and /clear last commands.""" normalized = (arguments or "").strip().lower() diff --git a/src/fast_agent/agents/agent_types.py b/src/fast_agent/agents/agent_types.py index f4a0d214a..581fd8106 100644 --- a/src/fast_agent/agents/agent_types.py +++ b/src/fast_agent/agents/agent_types.py @@ -2,10 +2,11 @@ Type definitions for agents and agent configurations. """ +from collections.abc import Callable from dataclasses import dataclass, field from enum import StrEnum, auto from pathlib import Path -from typing import TypeAlias +from typing import Any, TypeAlias from mcp.client.session import ElicitationFnT @@ -41,6 +42,13 @@ class AgentType(StrEnum): | SkillsDefault ) +# Function tools can be: +# - A callable (Python function) +# - A string spec like "module.py:function_name" (for dynamic loading) +FunctionToolConfig: TypeAlias = Callable[..., Any] | str + +FunctionToolsConfig: TypeAlias = list[FunctionToolConfig] | None + @dataclass class AgentConfig: @@ -48,6 +56,7 @@ class AgentConfig: name: str instruction: str = DEFAULT_AGENT_INSTRUCTION + description: str | None = None servers: list[str] = field(default_factory=list) tools: dict[str, list[str]] = field(default_factory=dict) # filters for tools resources: dict[str, list[str]] = field(default_factory=dict) # filters for resources @@ -62,6 +71,7 @@ class AgentConfig: default: bool = False elicitation_handler: ElicitationFnT | None = None api_key: str | None = None + function_tools: FunctionToolsConfig = None def __post_init__(self): """Ensure default_request_params exists with proper history setting""" diff --git a/src/fast_agent/agents/llm_agent.py b/src/fast_agent/agents/llm_agent.py index 8bbac38ef..3e0cd07aa 100644 --- a/src/fast_agent/agents/llm_agent.py +++ b/src/fast_agent/agents/llm_agent.py @@ -90,6 +90,7 @@ async def show_assistant_message( name: str | None = None, model: str | None = None, additional_message: Optional[Text] = None, + render_markdown: bool | None = None, ) -> None: """Display an assistant message with appropriate styling based on stop reason. @@ -101,6 +102,7 @@ async def show_assistant_message( name: Optional agent name to display model: Optional model name to display additional_message: Optional additional message to display + render_markdown: Force markdown rendering (True) or plain rendering (False) """ # Determine display content based on stop reason if not provided @@ -227,6 +229,7 @@ async def show_assistant_message( name=display_name, model=display_model, additional_message=additional_message_text, + render_markdown=render_markdown, ) def show_user_message(self, message: PromptMessageExtended) -> None: @@ -295,6 +298,8 @@ async def generate_impl( llm = self._require_llm() display_name = self.name display_model = llm.model_name + _, streaming_mode = self.display.resolve_streaming_preferences() + render_markdown = True if streaming_mode == "markdown" else False remove_listener: Callable[[], None] | None = None remove_tool_listener: Callable[[], None] | None = None @@ -327,7 +332,11 @@ async def generate_impl( stream_handle.finalize(result) - await self.show_assistant_message(result, additional_message=summary_text) + await self.show_assistant_message( + result, + additional_message=summary_text, + render_markdown=render_markdown, + ) else: result, summary = await self._generate_with_summary( messages, request_params, tools diff --git a/src/fast_agent/agents/llm_decorator.py b/src/fast_agent/agents/llm_decorator.py index 360ee6da6..b442d5627 100644 --- a/src/fast_agent/agents/llm_decorator.py +++ b/src/fast_agent/agents/llm_decorator.py @@ -1130,7 +1130,7 @@ async def agent_card(self) -> AgentCard: return AgentCard( skills=[], name=self._name, - description=self.instruction, + description=self.config.description or self.instruction, url=f"fast-agent://agents/{self._name}/", version="0.1", capabilities=DEFAULT_CAPABILITIES, @@ -1153,5 +1153,6 @@ async def show_assistant_message( name: str | None = None, model: str | None = None, additional_message: Union["Text", None] = None, + render_markdown: bool | None = None, ) -> None: pass diff --git a/src/fast_agent/agents/mcp_agent.py b/src/fast_agent/agents/mcp_agent.py index f3459c57d..ef4a62614 100644 --- a/src/fast_agent/agents/mcp_agent.py +++ b/src/fast_agent/agents/mcp_agent.py @@ -1290,7 +1290,7 @@ async def agent_card(self) -> AgentCard: return AgentCard( skills=skills, name=self._name, - description=self.instruction, + description=self.config.description or self.instruction, url=f"fast-agent://agents/{self._name}/", version="0.1", capabilities=DEFAULT_CAPABILITIES, @@ -1309,6 +1309,7 @@ async def show_assistant_message( name: str | None = None, model: str | None = None, additional_message: Union["Text", None] = None, + render_markdown: bool | None = None, ) -> None: """ Display an assistant message with MCP servers in the bottom bar. @@ -1350,6 +1351,7 @@ async def show_assistant_message( name=name, model=model, additional_message=additional_message, + render_markdown=render_markdown, ) def _extract_servers_from_message(self, message: PromptMessageExtended) -> list[str]: diff --git a/src/fast_agent/agents/tool_agent.py b/src/fast_agent/agents/tool_agent.py index ed81a3be9..e7e6131fa 100644 --- a/src/fast_agent/agents/tool_agent.py +++ b/src/fast_agent/agents/tool_agent.py @@ -1,4 +1,6 @@ +import json as json_module import time +from contextvars import ContextVar from typing import Any, Callable, Dict, List, Sequence from mcp.server.fastmcp.tools.base import Tool as FastMCPTool @@ -14,14 +16,22 @@ ) from fast_agent.context import Context from fast_agent.core.logging.logger import get_logger +from fast_agent.core.prompt import Prompt +from fast_agent.event_progress import ProgressAction from fast_agent.interfaces import ToolRunnerHookCapable from fast_agent.mcp.helpers.content_helpers import text_content +from fast_agent.mcp.tool_execution_handler import ToolExecutionHandler from fast_agent.tools.elicitation import get_elicitation_fastmcp_tool from fast_agent.types import PromptMessageExtended, RequestParams, ToolTimingInfo from fast_agent.utils.async_utils import gather_with_cancel logger = get_logger(__name__) +_tool_progress_context: ContextVar[tuple[ToolExecutionHandler, str] | None] = ContextVar( + "tool_progress_context", + default=None, +) + class ToolAgent(LlmAgent, _ToolLoopAgent): """ @@ -42,6 +52,7 @@ def __init__( self._execution_tools: dict[str, FastMCPTool] = {} self._tool_schemas: list[Tool] = [] + self.tool_runner_hooks: ToolRunnerHooks | None = None # Build a working list of tools and auto-inject human-input tool if missing working_tools: list[FastMCPTool | Callable] = list(tools) if tools else [] @@ -76,6 +87,132 @@ def __init__( ) ) + def _clone_constructor_kwargs(self) -> dict[str, Any]: + """Carry local tool definitions into detached clones.""" + if not self._execution_tools: + return {} + return {"tools": list(self._execution_tools.values())} + + def add_tool(self, tool: FastMCPTool, *, replace: bool = True) -> None: + """Register a new execution tool and expose it to the LLM.""" + name = tool.name + if not replace and name in self._execution_tools: + raise ValueError(f"Tool '{name}' already exists") + + self._execution_tools[name] = tool + self._tool_schemas = [schema for schema in self._tool_schemas if schema.name != name] + self._tool_schemas.append( + Tool( + name=tool.name, + description=tool.description, + inputSchema=tool.parameters, + ) + ) + + def add_agent_tool( + self, + child: LlmAgent, + *, + name: str | None = None, + description: str | None = None, + ) -> str: + """Expose another agent as a tool on this agent.""" + tool_name = name or f"agent__{child.name}" + if not description: + config = getattr(child, "config", None) + description = getattr(config, "description", None) or getattr( + child, "instruction", None + ) + tool_description = description or f"Send a message to the {child.name} agent" + + async def call_agent(text: str | None = None, json: dict | None = None) -> str: + if text is not None: + input_text = text + elif json is not None: + input_text = json_module.dumps(json, ensure_ascii=False) + else: + input_text = "" + clone = await child.spawn_detached_instance(name=f"{child.name}[tool]") + progress_step = 0 + + async def emit_progress(label: str | None = None) -> None: + nonlocal progress_step + progress_step += 1 + message = f"{child.name} step {progress_step}" + if label: + message = f"{message} ({label})" + + ctx = _tool_progress_context.get() + if ctx: + handler, tool_call_id = ctx + try: + await handler.on_tool_progress( + tool_call_id, float(progress_step), None, message + ) + except Exception: + pass + + logger.info( + "Agent tool progress", + data={ + "progress_action": ProgressAction.TOOL_PROGRESS, + "agent_name": self.name, + "progress": progress_step, + "total": None, + "details": message, + }, + ) + + hooks_set = False + if isinstance(clone, ToolAgent): + existing_hooks = getattr(clone, "tool_runner_hooks", None) + before_llm_call = existing_hooks.before_llm_call if existing_hooks else None + before_tool_call = existing_hooks.before_tool_call if existing_hooks else None + after_llm_call = existing_hooks.after_llm_call if existing_hooks else None + after_tool_call = existing_hooks.after_tool_call if existing_hooks else None + + async def handle_before_llm_call(runner, messages): + if before_llm_call: + await before_llm_call(runner, messages) + await emit_progress("llm") + + async def handle_before_tool_call(runner, message): + if before_tool_call: + await before_tool_call(runner, message) + await emit_progress("tool") + + clone.tool_runner_hooks = ToolRunnerHooks( + before_llm_call=handle_before_llm_call, + after_llm_call=after_llm_call, + before_tool_call=handle_before_tool_call, + after_tool_call=after_tool_call, + ) + hooks_set = True + + try: + if not hooks_set: + await emit_progress("run") + clone.load_message_history([]) + response = await clone.generate([Prompt.user(input_text)], None) + return response.last_text() or "" + finally: + try: + await clone.shutdown() + except Exception as exc: + logger.warning(f"Error shutting down tool clone for {child.name}: {exc}") + try: + child.merge_usage_from(clone) + except Exception as exc: + logger.warning(f"Failed to merge tool clone usage for {child.name}: {exc}") + + fast_tool = FastMCPTool.from_function( + call_agent, + name=tool_name, + description=tool_description, + ) + self.add_tool(fast_tool) + return tool_name + async def generate_impl( self, messages: List[PromptMessageExtended], @@ -302,15 +439,49 @@ async def call_tool(self, name: str, arguments: Dict[str, Any] | None = None) -> isError=True, ) + tool_handler = self._get_tool_handler() + tool_call_id = None + if tool_handler: + try: + tool_call_id = await tool_handler.on_tool_start(name, "local", arguments, None) + except Exception: + tool_call_id = None + + token = None + if tool_handler and tool_call_id: + token = _tool_progress_context.set((tool_handler, tool_call_id)) + try: result = await fast_tool.run(arguments or {}, convert_result=False) - return CallToolResult( + tool_result = CallToolResult( content=[text_content(str(result))], isError=False, ) + if tool_handler and tool_call_id: + try: + await tool_handler.on_tool_complete( + tool_call_id, True, tool_result.content, None + ) + except Exception: + pass + return tool_result except Exception as e: logger.error(f"Tool {name} failed: {e}") - return CallToolResult( + tool_result = CallToolResult( content=[text_content(f"Error: {str(e)}")], isError=True, ) + if tool_handler and tool_call_id: + try: + await tool_handler.on_tool_complete(tool_call_id, False, None, str(e)) + except Exception: + pass + return tool_result + finally: + if token is not None: + _tool_progress_context.reset(token) + + def _get_tool_handler(self) -> ToolExecutionHandler | None: + context = getattr(self, "_context", None) + acp = getattr(context, "acp", None) if context else None + return getattr(acp, "progress_manager", None) diff --git a/src/fast_agent/agents/workflow/agents_as_tools_agent.py b/src/fast_agent/agents/workflow/agents_as_tools_agent.py index 414901c97..fdce8e3eb 100644 --- a/src/fast_agent/agents/workflow/agents_as_tools_agent.py +++ b/src/fast_agent/agents/workflow/agents_as_tools_agent.py @@ -340,6 +340,13 @@ async def list_tools(self) -> ListToolsResult: if tool_name in existing_names: continue + description = None + config = getattr(agent, "config", None) + if config is not None: + description = getattr(config, "description", None) + if not description: + description = agent.instruction + input_schema: dict[str, Any] = { "type": "object", "properties": { @@ -351,7 +358,7 @@ async def list_tools(self) -> ListToolsResult: tools.append( Tool( name=tool_name, - description=agent.instruction, + description=description, inputSchema=input_schema, ) ) diff --git a/src/fast_agent/cli/commands/README.md b/src/fast_agent/cli/commands/README.md index 8f2060123..469e8fc57 100644 --- a/src/fast_agent/cli/commands/README.md +++ b/src/fast_agent/cli/commands/README.md @@ -83,6 +83,7 @@ fast-agent serve [OPTIONS] - `--instruction`, `-i TEXT`: Instruction for the agent (defaults to the standard FastAgent instruction) - `--config-path`, `-c TEXT`: Path to config file - `--servers TEXT`: Comma-separated list of server names to enable from config +- `--card`, `--agent-cards TEXT`: Path or URL to an AgentCard file or directory (repeatable) - `--url TEXT`: Comma-separated list of HTTP/SSE URLs to connect to - `--auth TEXT`: Bearer token for authorization with URL-based servers - `--model TEXT`: Override the default model (e.g., haiku, sonnet, gpt-4) @@ -119,6 +120,9 @@ fast-agent serve --url=https://api.example.com/mcp --npx "@modelcontextprotocol/ # Custom tool description (the {agent} placeholder is replaced with the agent name) fast-agent serve --description "Interact with the {agent} workflow via MCP" +# Load AgentCards from a file or directory +fast-agent serve --card ./agents --transport=http + # Use per-connection instances to isolate history between clients fast-agent serve --instance-scope=connection --transport=http ``` diff --git a/src/fast_agent/cli/commands/acp.py b/src/fast_agent/cli/commands/acp.py index 1fed5f63f..2719b4140 100644 --- a/src/fast_agent/cli/commands/acp.py +++ b/src/fast_agent/cli/commands/acp.py @@ -39,6 +39,12 @@ def run_acp( servers: str | None = typer.Option( None, "--servers", help="Comma-separated list of server names to enable from config" ), + agent_cards: list[str] | None = typer.Option( + None, + "--agent-cards", + "--card", + help="Path or URL to an AgentCard file or directory (repeatable)", + ), urls: str | None = typer.Option( None, "--url", help="Comma-separated list of HTTP/SSE URLs to connect to" ), @@ -112,6 +118,7 @@ def run_acp( instruction=resolved_instruction, config_path=config_path, servers=servers, + agent_cards=agent_cards, urls=urls, auth=auth, model=model, diff --git a/src/fast_agent/cli/commands/go.py b/src/fast_agent/cli/commands/go.py index 6e4c2c889..bc3e501de 100644 --- a/src/fast_agent/cli/commands/go.py +++ b/src/fast_agent/cli/commands/go.py @@ -12,6 +12,7 @@ from fast_agent.cli.commands.server_helpers import add_servers_to_config, generate_server_name from fast_agent.cli.commands.url_parser import generate_server_configs, parse_server_urls from fast_agent.constants import DEFAULT_AGENT_INSTRUCTION +from fast_agent.core.exceptions import AgentConfigError from fast_agent.utils.async_utils import configure_uvloop, create_event_loop, ensure_event_loop app = typer.Typer( @@ -107,6 +108,7 @@ async def _run_agent( instruction: str = default_instruction, config_path: str | None = None, server_list: list[str] | None = None, + agent_cards: list[str] | None = None, model: str | None = None, message: str | None = None, prompt_file: str | None = None, @@ -122,6 +124,8 @@ async def _run_agent( tool_description: str | None = None, instance_scope: str = "shared", permissions_enabled: bool = True, + reload: bool = False, + watch: bool = False, ) -> None: """Async implementation to run an interactive agent.""" from fast_agent import FastAgent @@ -143,6 +147,8 @@ async def _run_agent( # Set model on args so model source detection works correctly if model: fast.args.model = model + fast.args.reload = reload + fast.args.watch = watch if shell_runtime: await fast.app.initialize() @@ -154,8 +160,32 @@ async def _run_agent( if stdio_servers: await add_servers_to_config(fast, cast("dict[str, dict[str, Any]]", stdio_servers)) + if agent_cards: + try: + for card_source in agent_cards: + if card_source.startswith(("http://", "https://")): + fast.load_agents_from_url(card_source) + else: + fast.load_agents(card_source) + except AgentConfigError as exc: + fast._handle_error(exc) + raise typer.Exit(1) from exc + + async def cli_agent(): + async with fast.run() as agent: + if message: + response = await agent.send(message) + print(response) + elif prompt_file: + prompt = load_prompt(Path(prompt_file)) + agent_obj = agent._agent(None) + await agent_obj.generate(prompt) + print(f"\nLoaded {len(prompt)} messages from prompt file '{prompt_file}'") + await agent.interactive() + else: + await agent.interactive() # Check if we have multiple models (comma-delimited) - if model and "," in model: + elif model and "," in model: # Parse multiple models models = [m.strip() for m in model.split(",") if m.strip()] @@ -258,6 +288,7 @@ def run_async_agent( servers: str | None = None, urls: str | None = None, auth: str | None = None, + agent_cards: list[str] | None = None, model: str | None = None, message: str | None = None, prompt_file: str | None = None, @@ -272,6 +303,8 @@ def run_async_agent( tool_description: str | None = None, instance_scope: str = "shared", permissions_enabled: bool = True, + reload: bool = False, + watch: bool = False, ): """Run the async agent function with proper loop handling.""" configure_uvloop() @@ -362,6 +395,7 @@ def run_async_agent( instruction=instruction, config_path=config_path, server_list=server_list, + agent_cards=agent_cards, model=model, message=message, prompt_file=prompt_file, @@ -377,6 +411,8 @@ def run_async_agent( tool_description=tool_description, instance_scope=instance_scope, permissions_enabled=permissions_enabled, + reload=reload, + watch=watch, ) ) finally: @@ -406,6 +442,12 @@ def go( servers: str | None = typer.Option( None, "--servers", help="Comma-separated list of server names to enable from config" ), + agent_cards: list[str] | None = typer.Option( + None, + "--agent-cards", + "--card", + help="Path or URL to an AgentCard file or directory (repeatable)", + ), urls: str | None = typer.Option( None, "--url", help="Comma-separated list of HTTP/SSE URLs to connect to" ), @@ -442,6 +484,8 @@ def go( "-x", help="Enable a local shell runtime and expose the execute tool (bash or pwsh).", ), + reload: bool = typer.Option(False, "--reload", help="Enable manual AgentCard reloads (/reload)"), + watch: bool = typer.Option(False, "--watch", help="Watch AgentCard paths and reload"), ) -> None: """ Run an interactive agent directly from the command line. @@ -451,6 +495,7 @@ def go( fast-agent go --instruction=https://raw.githubusercontent.com/user/repo/prompt.md fast-agent go --message="What is the weather today?" --model=haiku fast-agent go --prompt-file=my-prompt.txt --model=haiku + fast-agent go --agent-cards ./agents --watch fast-agent go --url=http://localhost:8001/mcp,http://api.example.com/sse fast-agent go --url=https://api.example.com/mcp --auth=YOUR_API_TOKEN fast-agent go --npx "@modelcontextprotocol/server-filesystem /path/to/data" @@ -471,11 +516,14 @@ def go( --auth Bearer token for authorization with URL-based servers --message, -m Send a single message and exit --prompt-file, -p Use a prompt file instead of interactive mode + --agent-cards Load AgentCards from a file or directory --skills Override the default skills folder --shell, -x Enable local shell runtime --npx NPX package and args to run as MCP server (quoted) --uvx UVX package and args to run as MCP server (quoted) --stdio Command to run as STDIO MCP server (quoted) + --reload Enable manual AgentCard reloads (/reload) + --watch Watch AgentCard paths and reload """ # Collect all stdio commands from convenience options stdio_commands = collect_stdio_commands(npx, uvx, stdio) @@ -491,6 +539,7 @@ def go( instruction=resolved_instruction, config_path=config_path, servers=servers, + agent_cards=agent_cards, urls=urls, auth=auth, model=model, @@ -501,4 +550,6 @@ def go( skills_directory=skills_dir, shell_enabled=shell_enabled, instance_scope="shared", + reload=reload, + watch=watch, ) diff --git a/src/fast_agent/cli/commands/serve.py b/src/fast_agent/cli/commands/serve.py index d2deccf75..d3d42eee3 100644 --- a/src/fast_agent/cli/commands/serve.py +++ b/src/fast_agent/cli/commands/serve.py @@ -42,6 +42,12 @@ def serve( servers: str | None = typer.Option( None, "--servers", help="Comma-separated list of server names to enable from config" ), + agent_cards: list[str] | None = typer.Option( + None, + "--agent-cards", + "--card", + help="Path or URL to an AgentCard file or directory (repeatable)", + ), urls: str | None = typer.Option( None, "--url", help="Comma-separated list of HTTP/SSE URLs to connect to" ), @@ -113,6 +119,7 @@ def serve( fast-agent serve --stdio "python my_server.py --debug" fast-agent serve --npx "@modelcontextprotocol/server-filesystem /path/to/data" fast-agent serve --description "Interact with the {agent} assistant" + fast-agent serve --agent-cards ./agents --transport=http --port=8000 """ stdio_commands = collect_stdio_commands(npx, uvx, stdio) shell_enabled = shell @@ -124,6 +131,7 @@ def serve( instruction=resolved_instruction, config_path=config_path, servers=servers, + agent_cards=agent_cards, urls=urls, auth=auth, model=model, diff --git a/src/fast_agent/cli/constants.py b/src/fast_agent/cli/constants.py index 301afc34d..b42048f92 100644 --- a/src/fast_agent/cli/constants.py +++ b/src/fast_agent/cli/constants.py @@ -23,6 +23,10 @@ "-x", "--skills", "--skills-dir", + "--agent-cards", + "--card", + "--watch", + "--reload", } # Known subcommands that should not trigger auto-routing diff --git a/src/fast_agent/core/agent_app.py b/src/fast_agent/core/agent_app.py index 6f5ecdedf..9193c78b8 100644 --- a/src/fast_agent/core/agent_app.py +++ b/src/fast_agent/core/agent_app.py @@ -2,7 +2,7 @@ Direct AgentApp implementation for interacting with agents without proxies. """ -from typing import Mapping, Union +from typing import Awaitable, Callable, Mapping, Union from deprecated import deprecated from mcp.types import GetPromptResult, PromptMessage @@ -30,16 +30,28 @@ class AgentApp: calls to the default agent (the first agent in the container). """ - def __init__(self, agents: dict[str, AgentProtocol]) -> None: + def __init__( + self, + agents: dict[str, AgentProtocol], + *, + reload_callback: Callable[[], Awaitable[bool]] | None = None, + refresh_callback: Callable[[], Awaitable[bool]] | None = None, + load_card_callback: Callable[[str], Awaitable[list[str]]] | None = None, + ) -> None: """ Initialize the DirectAgentApp. Args: agents: Dictionary of agent instances keyed by name + reload_callback: Optional callback for manual AgentCard reloads + refresh_callback: Optional callback for lazy instance refresh before requests """ if len(agents) == 0: raise ValueError("No agents provided!") self._agents = agents + self._reload_callback = reload_callback + self._refresh_callback = refresh_callback + self._load_card_callback = load_card_callback def __getitem__(self, key: str) -> AgentProtocol: """Allow access to agents using dictionary syntax.""" @@ -77,6 +89,7 @@ async def __call__( The agent's response as a string or the result of the interactive session """ if message: + await self._refresh_if_needed() return await self._agent(agent_name).send(message, request_params) return await self.interactive( @@ -103,6 +116,7 @@ async def send( Returns: The agent's response as a string """ + await self._refresh_if_needed() return await self._agent(agent_name).send(message, request_params) def _agent(self, agent_name: str | None) -> AgentProtocol: @@ -136,6 +150,7 @@ async def apply_prompt( Returns: The agent's response as a string """ + await self._refresh_if_needed() return await self._agent(agent_name).apply_prompt( prompt, arguments, as_template=as_template ) @@ -151,6 +166,7 @@ async def list_prompts(self, namespace: str | None = None, agent_name: str | Non Returns: Dictionary mapping server names to lists of available prompts """ + await self._refresh_if_needed() if not agent_name: results = {} for agent in self._agents.values(): @@ -178,6 +194,7 @@ async def get_prompt( Returns: GetPromptResult containing the prompt information """ + await self._refresh_if_needed() return await self._agent(agent_name).get_prompt( prompt_name=prompt_name, arguments=arguments, namespace=server_name ) @@ -201,6 +218,7 @@ async def with_resource( Returns: The agent's response as a string """ + await self._refresh_if_needed() return await self._agent(agent_name).with_resource( prompt_content=prompt_content, resource_uri=resource_uri, namespace=server_name ) @@ -220,6 +238,7 @@ async def list_resources( Returns: Dictionary mapping server names to lists of resource URIs """ + await self._refresh_if_needed() return await self._agent(agent_name).list_resources(namespace=server_name) async def get_resource( @@ -239,10 +258,63 @@ async def get_resource( Returns: ReadResourceResult object containing the resource content """ + await self._refresh_if_needed() return await self._agent(agent_name).get_resource( resource_uri=resource_uri, namespace=server_name ) + async def reload_agents(self) -> bool: + """Reload AgentCards and refresh active instances when available.""" + if not self._reload_callback: + return False + return await self._reload_callback() + + def can_reload_agents(self) -> bool: + """Return True if manual reload is available.""" + return self._reload_callback is not None + + def can_load_agent_cards(self) -> bool: + """Return True if agent card loading is available.""" + return self._load_card_callback is not None + + async def load_agent_card(self, source: str) -> list[str]: + """Load an AgentCard source and refresh active instances when available.""" + if not self._load_card_callback: + raise RuntimeError("Agent card loading is not available.") + return await self._load_card_callback(source) + + def set_agents(self, agents: dict[str, AgentProtocol]) -> None: + """Replace the active agent map (used after reload).""" + if not agents: + raise ValueError("No agents provided!") + self._agents = agents + + def set_reload_callback(self, callback: Callable[[], Awaitable[bool]] | None) -> None: + """Update the reload callback for manual AgentCard refresh.""" + self._reload_callback = callback + + def set_refresh_callback(self, callback: Callable[[], Awaitable[bool]] | None) -> None: + """Update the refresh callback for lazy instance swaps.""" + self._refresh_callback = callback + + def set_load_card_callback( + self, callback: Callable[[str], Awaitable[list[str]]] | None + ) -> None: + """Update the callback for loading agent cards at runtime.""" + self._load_card_callback = callback + + def agent_names(self) -> list[str]: + """Return available agent names.""" + return list(self._agents.keys()) + + def agent_types(self) -> dict[str, AgentType]: + """Return mapping of agent names to agent types.""" + return {name: agent.agent_type for name, agent in self._agents.items()} + + async def _refresh_if_needed(self) -> None: + if self._refresh_callback: + await self._refresh_callback() + @deprecated async def prompt( self, diff --git a/src/fast_agent/core/agent_card_loader.py b/src/fast_agent/core/agent_card_loader.py new file mode 100644 index 000000000..b52d94806 --- /dev/null +++ b/src/fast_agent/core/agent_card_loader.py @@ -0,0 +1,871 @@ +"""AgentCard loader and export helpers for Markdown/YAML card files.""" + +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Iterable + +import frontmatter +import yaml + +from fast_agent.agents.agent_types import AgentConfig, AgentType +from fast_agent.core.direct_decorators import _resolve_instruction +from fast_agent.core.exceptions import AgentConfigError +from fast_agent.types import RequestParams + +_TYPE_MAP: dict[str, AgentType] = { + "agent": AgentType.BASIC, + "chain": AgentType.CHAIN, + "parallel": AgentType.PARALLEL, + "evaluator_optimizer": AgentType.EVALUATOR_OPTIMIZER, + "router": AgentType.ROUTER, + "orchestrator": AgentType.ORCHESTRATOR, + "iterative_planner": AgentType.ITERATIVE_PLANNER, + "maker": AgentType.MAKER, +} + +_COMMON_FIELDS = {"type", "name", "instruction", "description", "default", "schema_version"} + +_ALLOWED_FIELDS_BY_TYPE: dict[str, set[str]] = { + "agent": { + *_COMMON_FIELDS, + "agents", + "servers", + "tools", + "resources", + "prompts", + "skills", + "model", + "use_history", + "request_params", + "human_input", + "api_key", + "history_mode", + "max_parallel", + "child_timeout_sec", + "max_display_instances", + "function_tools", + "tool_hooks", + "messages", + }, + "chain": { + *_COMMON_FIELDS, + "sequence", + "cumulative", + }, + "parallel": { + *_COMMON_FIELDS, + "fan_out", + "fan_in", + "include_request", + }, + "evaluator_optimizer": { + *_COMMON_FIELDS, + "generator", + "evaluator", + "min_rating", + "max_refinements", + "refinement_instruction", + "messages", + }, + "router": { + *_COMMON_FIELDS, + "agents", + "servers", + "tools", + "resources", + "prompts", + "model", + "use_history", + "request_params", + "human_input", + "api_key", + "messages", + }, + "orchestrator": { + *_COMMON_FIELDS, + "agents", + "model", + "use_history", + "request_params", + "human_input", + "api_key", + "plan_type", + "plan_iterations", + "messages", + }, + "iterative_planner": { + *_COMMON_FIELDS, + "agents", + "model", + "request_params", + "api_key", + "plan_iterations", + "messages", + }, + "MAKER": { + *_COMMON_FIELDS, + "worker", + "k", + "max_samples", + "match_strategy", + "red_flag_max_length", + "messages", + }, +} + +_REQUIRED_FIELDS_BY_TYPE: dict[str, set[str]] = { + "agent": set(), + "chain": {"sequence"}, + "parallel": {"fan_out"}, + "evaluator_optimizer": {"generator", "evaluator"}, + "router": {"agents"}, + "orchestrator": {"agents"}, + "iterative_planner": {"agents"}, + "MAKER": {"worker"}, +} + +_HISTORY_DELIMITERS = {"---USER", "---ASSISTANT", "---RESOURCE"} + +_AGENT_TYPE_TO_CARD_TYPE: dict[str, str] = { + AgentType.BASIC.value: "agent", + AgentType.CHAIN.value: "chain", + AgentType.PARALLEL.value: "parallel", + AgentType.EVALUATOR_OPTIMIZER.value: "evaluator_optimizer", + AgentType.ROUTER.value: "router", + AgentType.ORCHESTRATOR.value: "orchestrator", + AgentType.ITERATIVE_PLANNER.value: "iterative_planner", + AgentType.MAKER.value: "MAKER", +} + +_DEFAULT_USE_HISTORY_BY_TYPE: dict[str, bool] = { + "agent": True, + "chain": True, + "parallel": True, + "evaluator_optimizer": True, + "router": False, + "orchestrator": False, + "iterative_planner": False, + "MAKER": True, +} + + +@dataclass(frozen=True) +class LoadedAgentCard: + name: str + path: Path + agent_data: dict[str, Any] + message_files: list[Path] + + +def load_agent_cards(path: Path) -> list[LoadedAgentCard]: + path = path.expanduser().resolve() + if not path.exists(): + raise AgentConfigError(f"AgentCard path not found: {path}") + + if path.is_dir(): + cards: list[LoadedAgentCard] = [] + for entry in sorted(path.iterdir()): + if entry.is_dir(): + continue + if entry.suffix.lower() not in {".md", ".markdown", ".yaml", ".yml"}: + continue + cards.extend(_load_agent_card_file(entry)) + _ensure_unique_names(cards, path) + return cards + + if path.suffix.lower() not in {".md", ".markdown", ".yaml", ".yml"}: + raise AgentConfigError(f"Unsupported AgentCard file extension: {path}") + + cards = _load_agent_card_file(path) + _ensure_unique_names(cards, path) + return cards + + +def _ensure_unique_names(cards: Iterable[LoadedAgentCard], path: Path) -> None: + seen: dict[str, Path] = {} + for card in cards: + if card.name in seen: + raise AgentConfigError( + f"Duplicate agent name '{card.name}' in {path}", + f"Conflicts: {seen[card.name]} and {card.path}", + ) + seen[card.name] = card.path + + +def _load_agent_card_file(path: Path) -> list[LoadedAgentCard]: + suffix = path.suffix.lower() + if suffix in {".yaml", ".yml"}: + raw = _load_yaml_card(path) + return [_build_card_from_data(path, raw, body=None)] + if suffix in {".md", ".markdown"}: + metadata, body = _load_markdown_card(path) + return [_build_card_from_data(path, metadata, body=body)] + raise AgentConfigError(f"Unsupported AgentCard file: {path}") + + +def _load_yaml_card(path: Path) -> dict[str, Any]: + try: + data = yaml.safe_load(path.read_text(encoding="utf-8")) + except yaml.YAMLError as exc: + raise AgentConfigError(f"Failed to parse YAML in {path}", str(exc)) from exc + + if not isinstance(data, dict): + raise AgentConfigError(f"AgentCard YAML must be a mapping in {path}") + return data + + +def _load_markdown_card(path: Path) -> tuple[dict[str, Any], str]: + try: + raw_text = path.read_text(encoding="utf-8") + if raw_text.startswith("\ufeff"): + raw_text = raw_text.lstrip("\ufeff") + post = frontmatter.loads(raw_text) + except Exception as exc: # noqa: BLE001 + raise AgentConfigError(f"Failed to parse frontmatter in {path}", str(exc)) from exc + + metadata = post.metadata or {} + if not isinstance(metadata, dict): + raise AgentConfigError(f"Frontmatter must be a mapping in {path}") + + body = post.content or "" + return dict(metadata), body + + +def _build_card_from_data( + path: Path, + raw: dict[str, Any], + *, + body: str | None, +) -> LoadedAgentCard: + raw = dict(raw) + card_type_raw = raw.get("type") + if card_type_raw is None: + card_type_key = "agent" + elif isinstance(card_type_raw, str): + card_type_key = card_type_raw.strip() or "agent" + else: + raise AgentConfigError(f"'type' must be a string in {path}") + card_type_key_norm = card_type_key.lower() + if card_type_key_norm == "maker": + type_key = "MAKER" + else: + type_key = card_type_key_norm + + if type_key not in _ALLOWED_FIELDS_BY_TYPE: + raise AgentConfigError(f"Unsupported agent type '{card_type_raw}' in {path}") + + allowed_fields = _ALLOWED_FIELDS_BY_TYPE[type_key] + unknown_fields = set(raw.keys()) - allowed_fields + if unknown_fields: + unknown_list = ", ".join(sorted(unknown_fields)) + raise AgentConfigError( + f"Unsupported fields for type '{type_key}' in {path}", + f"Unknown fields: {unknown_list}", + ) + + schema_version = raw.get("schema_version", 1) + if not isinstance(schema_version, int): + raise AgentConfigError(f"'schema_version' must be an integer in {path}") + + name = _resolve_name(raw.get("name"), path) + instruction = _resolve_instruction_field(raw.get("instruction"), body, path) + description = _ensure_optional_str(raw.get("description"), "description", path) + + required_fields = _REQUIRED_FIELDS_BY_TYPE[type_key] + missing = [field for field in required_fields if field not in raw or raw[field] is None] + if missing: + missing_list = ", ".join(missing) + raise AgentConfigError( + f"Missing required fields for type '{type_key}' in {path}", + f"Required: {missing_list}", + ) + + message_files = _resolve_message_files(raw.get("messages"), path, type_key) + + agent_type = _TYPE_MAP[type_key.lower()] if type_key != "MAKER" else AgentType.MAKER + agent_data = _build_agent_data( + agent_type=agent_type, + type_key=type_key, + name=name, + instruction=instruction, + description=description, + raw=raw, + path=path, + ) + agent_data["schema_version"] = schema_version + + return LoadedAgentCard( + name=name, + path=path, + agent_data=agent_data, + message_files=message_files, + ) + + +def _resolve_name(raw_name: Any, path: Path) -> str: + if raw_name is None: + return path.stem + if not isinstance(raw_name, str) or not raw_name.strip(): + raise AgentConfigError(f"'name' must be a non-empty string in {path}") + return raw_name.strip() + + +def _resolve_instruction_field( + raw_instruction: Any, + body: str | None, + path: Path, +) -> str: + body_instruction = "" + if body is not None: + body_instruction = _extract_body_instruction(body, path) + + if raw_instruction is not None and body_instruction: + raise AgentConfigError( + "Instruction cannot be provided in both body and 'instruction' field", + f"Path: {path}", + ) + + if raw_instruction is not None: + if not isinstance(raw_instruction, str): + raise AgentConfigError(f"'instruction' must be a string in {path}") + resolved = _resolve_instruction(raw_instruction.strip()) + if not resolved.strip(): + raise AgentConfigError(f"'instruction' must not be empty in {path}") + return resolved + + if body_instruction: + resolved = _resolve_instruction(body_instruction) + if not resolved.strip(): + raise AgentConfigError(f"Instruction body must not be empty in {path}") + return resolved + + raise AgentConfigError(f"Instruction is required in {path}") + + +def _extract_body_instruction(body: str, path: Path) -> str: + if not body: + return "" + lines = body.splitlines() + first_non_empty = None + for idx, line in enumerate(lines): + if line.strip(): + first_non_empty = idx + break + if first_non_empty is None: + return "" + + if lines[first_non_empty].strip() == "---SYSTEM": + lines = lines[first_non_empty + 1 :] + else: + lines = lines[first_non_empty:] + + if any(line.strip() in _HISTORY_DELIMITERS for line in lines): + raise AgentConfigError( + "Inline history blocks are not supported inside AgentCard body", + f"Path: {path}", + ) + + return "\n".join(lines).strip() + + +def _resolve_message_files(raw_messages: Any, path: Path, type_key: str) -> list[Path]: + if raw_messages is None: + return [] + if not isinstance(raw_messages, (str, list)): + raise AgentConfigError(f"'messages' must be a string or list in {path}") + if isinstance(raw_messages, str): + entries = [raw_messages] + else: + entries = raw_messages + if not entries: + return [] + + message_paths: list[Path] = [] + for entry in entries: + if not isinstance(entry, str) or not entry.strip(): + raise AgentConfigError(f"'messages' entries must be strings in {path}") + candidate = Path(entry).expanduser() + if not candidate.is_absolute(): + candidate = (path.parent / candidate).resolve() + if not candidate.exists(): + raise AgentConfigError( + f"History file not found for AgentCard '{type_key}' in {path}", + f"Missing: {candidate}", + ) + message_paths.append(candidate) + return message_paths + + +def _build_agent_data( + *, + agent_type: AgentType, + type_key: str, + name: str, + instruction: str, + description: str | None, + raw: dict[str, Any], + path: Path, +) -> dict[str, Any]: + servers = _ensure_str_list(raw.get("servers", []), "servers", path) + tools = _ensure_filter_map(raw.get("tools", {}), "tools", path) + resources = _ensure_filter_map(raw.get("resources", {}), "resources", path) + prompts = _ensure_filter_map(raw.get("prompts", {}), "prompts", path) + + model = raw.get("model") + use_history = _default_use_history(type_key, raw.get("use_history")) + request_params = _ensure_request_params(raw.get("request_params"), path) + human_input = _ensure_bool(raw.get("human_input"), "human_input", path, default=False) + default = _ensure_bool(raw.get("default"), "default", path, default=False) + api_key = raw.get("api_key") + + # Parse function_tools - can be a string or list of strings + function_tools_raw = raw.get("function_tools") + function_tools = None + if function_tools_raw is not None: + if isinstance(function_tools_raw, str): + function_tools = [function_tools_raw] + elif isinstance(function_tools_raw, list): + function_tools = [str(t) for t in function_tools_raw] + + config = AgentConfig( + name=name, + instruction=instruction, + description=description, + servers=servers, + tools=tools, + resources=resources, + prompts=prompts, + skills=raw.get("skills"), + model=model, + use_history=use_history, + human_input=human_input, + default=default, + api_key=api_key, + function_tools=function_tools, + ) + + if request_params is not None: + config.default_request_params = request_params + config.default_request_params.systemPrompt = config.instruction + config.default_request_params.use_history = config.use_history + + agent_data: dict[str, Any] = { + "config": config, + "type": agent_type.value, + "func": None, + "source_path": str(path), + } + + if type_key == "agent": + agents = _ensure_str_list(raw.get("agents", []), "agents", path) + if agents: + agent_data["child_agents"] = agents + opts = _agents_as_tools_options(raw, path) + if opts: + agent_data["agents_as_tools_options"] = opts + if "function_tools" in raw: + agent_data["function_tools"] = raw.get("function_tools") + if "tool_hooks" in raw: + agent_data["tool_hooks"] = raw.get("tool_hooks") + elif type_key == "chain": + sequence = _ensure_str_list(raw.get("sequence", []), "sequence", path) + if not sequence: + raise AgentConfigError(f"'sequence' must include at least one agent in {path}") + agent_data["sequence"] = sequence + agent_data["cumulative"] = _ensure_bool(raw.get("cumulative"), "cumulative", path) + elif type_key == "parallel": + fan_out = _ensure_str_list(raw.get("fan_out", []), "fan_out", path) + if not fan_out: + raise AgentConfigError(f"'fan_out' must include at least one agent in {path}") + agent_data["fan_out"] = fan_out + fan_in = raw.get("fan_in") + if fan_in is not None and not isinstance(fan_in, str): + raise AgentConfigError(f"'fan_in' must be a string in {path}") + agent_data["fan_in"] = fan_in + agent_data["include_request"] = _ensure_bool( + raw.get("include_request"), "include_request", path, default=True + ) + elif type_key == "evaluator_optimizer": + agent_data["generator"] = _ensure_str(raw.get("generator"), "generator", path) + agent_data["evaluator"] = _ensure_str(raw.get("evaluator"), "evaluator", path) + agent_data["min_rating"] = _ensure_str(raw.get("min_rating", "GOOD"), "min_rating", path) + agent_data["max_refinements"] = _ensure_int( + raw.get("max_refinements", 3), "max_refinements", path + ) + agent_data["refinement_instruction"] = raw.get("refinement_instruction") + elif type_key == "router": + router_agents = _ensure_str_list(raw.get("agents", []), "agents", path) + if not router_agents: + raise AgentConfigError(f"'agents' must include at least one agent in {path}") + agent_data["router_agents"] = router_agents + agent_data["instruction"] = instruction + elif type_key in {"orchestrator", "iterative_planner"}: + child_agents = _ensure_str_list(raw.get("agents", []), "agents", path) + if not child_agents: + raise AgentConfigError(f"'agents' must include at least one agent in {path}") + agent_data["child_agents"] = child_agents + if type_key == "orchestrator": + agent_data["plan_type"] = _ensure_str(raw.get("plan_type", "full"), "plan_type", path) + agent_data["plan_iterations"] = _ensure_int( + raw.get("plan_iterations", 5), "plan_iterations", path + ) + else: + agent_data["plan_iterations"] = _ensure_int( + raw.get("plan_iterations", -1), "plan_iterations", path + ) + elif type_key == "MAKER": + agent_data["worker"] = _ensure_str(raw.get("worker"), "worker", path) + agent_data["k"] = _ensure_int(raw.get("k", 3), "k", path) + agent_data["max_samples"] = _ensure_int(raw.get("max_samples", 50), "max_samples", path) + agent_data["match_strategy"] = _ensure_str( + raw.get("match_strategy", "exact"), "match_strategy", path + ) + red_flag = raw.get("red_flag_max_length") + if red_flag is not None: + red_flag = _ensure_int(red_flag, "red_flag_max_length", path) + agent_data["red_flag_max_length"] = red_flag + + return agent_data + + +def _default_use_history(type_key: str, raw_value: Any) -> bool: + if isinstance(raw_value, bool): + return raw_value + if type_key in {"router", "orchestrator", "iterative_planner"}: + return False + return True + + +def _ensure_bool(value: Any, field: str, path: Path, default: bool = False) -> bool: + if value is None: + return default + if isinstance(value, bool): + return value + raise AgentConfigError(f"'{field}' must be a boolean in {path}") + + +def _ensure_str(value: Any, field: str, path: Path) -> str: + if not isinstance(value, str) or not value.strip(): + raise AgentConfigError(f"'{field}' must be a non-empty string in {path}") + return value + + +def _ensure_optional_str(value: Any, field: str, path: Path) -> str | None: + if value is None: + return None + if not isinstance(value, str) or not value.strip(): + raise AgentConfigError(f"'{field}' must be a non-empty string in {path}") + return value.strip() + + +def _ensure_str_list(value: Any, field: str, path: Path) -> list[str]: + if value is None: + return [] + if not isinstance(value, list): + raise AgentConfigError(f"'{field}' must be a list of strings in {path}") + result: list[str] = [] + for entry in value: + if not isinstance(entry, str) or not entry.strip(): + raise AgentConfigError(f"'{field}' entries must be strings in {path}") + result.append(entry) + return result + + +def _ensure_filter_map(value: Any, field: str, path: Path) -> dict[str, list[str]]: + if value is None: + return {} + if not isinstance(value, dict): + raise AgentConfigError(f"'{field}' must be a mapping in {path}") + result: dict[str, list[str]] = {} + for key, entry in value.items(): + if not isinstance(key, str) or not key.strip(): + raise AgentConfigError(f"'{field}' keys must be strings in {path}") + if not isinstance(entry, list): + raise AgentConfigError(f"'{field}' values must be lists in {path}") + for item in entry: + if not isinstance(item, str) or not item.strip(): + raise AgentConfigError(f"'{field}' values must be strings in {path}") + result[key] = entry + return result + + +def _ensure_request_params(value: Any, path: Path) -> RequestParams | None: + if value is None: + return None + if isinstance(value, RequestParams): + return value + if not isinstance(value, dict): + raise AgentConfigError(f"'request_params' must be a mapping in {path}") + try: + return RequestParams(**value) + except Exception as exc: # noqa: BLE001 + raise AgentConfigError(f"Invalid request_params in {path}", str(exc)) from exc + + +def _agents_as_tools_options(raw: dict[str, Any], path: Path) -> dict[str, Any]: + options: dict[str, Any] = {} + history_mode = raw.get("history_mode") + max_parallel = raw.get("max_parallel") + child_timeout_sec = raw.get("child_timeout_sec") + max_display_instances = raw.get("max_display_instances") + + if history_mode is not None: + options["history_mode"] = history_mode + if max_parallel is not None: + options["max_parallel"] = _ensure_int(max_parallel, "max_parallel", path) + if child_timeout_sec is not None: + options["child_timeout_sec"] = _ensure_float( + child_timeout_sec, "child_timeout_sec", path + ) + if max_display_instances is not None: + options["max_display_instances"] = _ensure_int( + max_display_instances, "max_display_instances", path + ) + return options + + +def _ensure_int(value: Any, field: str, path: Path) -> int: + if not isinstance(value, int): + raise AgentConfigError(f"'{field}' must be an integer in {path}") + return value + + +def _ensure_float(value: Any, field: str, path: Path) -> float: + if isinstance(value, (int, float)): + return float(value) + raise AgentConfigError(f"'{field}' must be a number in {path}") + + +def dump_agents_to_dir( + agents: dict[str, dict[str, Any]], + output_dir: Path, + *, + as_yaml: bool = False, + message_map: dict[str, list[Path]] | None = None, +) -> None: + output_dir = output_dir.expanduser().resolve() + output_dir.mkdir(parents=True, exist_ok=True) + + for name in sorted(agents.keys()): + output_path = output_dir / f"{name}.{'yaml' if as_yaml else 'md'}" + message_paths = message_map.get(name) if message_map else None + dump_agent_to_path( + name, + agents[name], + output_path, + as_yaml=as_yaml, + message_paths=message_paths, + ) + + +def dump_agent_to_path( + name: str, + agent_data: dict[str, Any], + output_path: Path, + *, + as_yaml: bool = False, + message_paths: list[Path] | None = None, +) -> None: + card_dict, instruction = _build_card_dump(name, agent_data, message_paths) + output_path = output_path.expanduser().resolve() + output_path.parent.mkdir(parents=True, exist_ok=True) + + if as_yaml: + card_dict = dict(card_dict) + card_dict["instruction"] = instruction + payload = yaml.safe_dump( + card_dict, + sort_keys=False, + allow_unicode=False, + ).rstrip() + output_path.write_text(f"{payload}\n", encoding="utf-8") + return + + frontmatter = yaml.safe_dump( + card_dict, + sort_keys=False, + allow_unicode=False, + ).rstrip() + output_path.write_text( + f"---\n{frontmatter}\n---\n{instruction.rstrip()}\n", + encoding="utf-8", + ) + + +def _build_card_dump( + name: str, + agent_data: dict[str, Any], + message_paths: list[Path] | None, +) -> tuple[dict[str, Any], str]: + agent_type_value = agent_data.get("type") + if not isinstance(agent_type_value, str): + raise AgentConfigError(f"Agent '{name}' is missing a valid type") + card_type = _AGENT_TYPE_TO_CARD_TYPE.get(agent_type_value) + if card_type is None: + raise AgentConfigError(f"Agent '{name}' has unsupported type '{agent_type_value}'") + + config = agent_data.get("config") + if not isinstance(config, AgentConfig): + raise AgentConfigError(f"Agent '{name}' is missing AgentConfig") + + instruction = config.instruction + if not instruction: + raise AgentConfigError(f"Agent '{name}' is missing instruction") + + card: dict[str, Any] = {"type": card_type, "name": name} + schema_version = agent_data.get("schema_version") + if isinstance(schema_version, int): + card["schema_version"] = schema_version + + allowed_fields = _ALLOWED_FIELDS_BY_TYPE.get(card_type, set()) + + if config.default and "default" in allowed_fields: + card["default"] = True + + if config.description and "description" in allowed_fields: + card["description"] = config.description + + if config.model and "model" in allowed_fields: + card["model"] = config.model + + if "use_history" in allowed_fields: + default_use_history = _DEFAULT_USE_HISTORY_BY_TYPE.get(card_type, True) + if config.use_history != default_use_history: + card["use_history"] = config.use_history + + if config.human_input and "human_input" in allowed_fields: + card["human_input"] = True + + if config.api_key and "api_key" in allowed_fields: + card["api_key"] = config.api_key + + if config.servers and "servers" in allowed_fields: + card["servers"] = list(config.servers) + + if config.tools and "tools" in allowed_fields: + card["tools"] = config.tools + + if config.resources and "resources" in allowed_fields: + card["resources"] = config.resources + + if config.prompts and "prompts" in allowed_fields: + card["prompts"] = config.prompts + + serialized_skills = _serialize_skills(config.skills) + if serialized_skills is not None and "skills" in allowed_fields: + card["skills"] = serialized_skills + + request_params_dump = _dump_request_params(config.default_request_params) + if request_params_dump and "request_params" in allowed_fields: + card["request_params"] = request_params_dump + + if message_paths and "messages" in allowed_fields: + card["messages"] = [str(path) for path in message_paths] + + if card_type == "agent": + child_agents = agent_data.get("child_agents") or [] + if child_agents: + card["agents"] = list(child_agents) + opts = agent_data.get("agents_as_tools_options") or {} + if "history_mode" in opts and opts["history_mode"] is not None: + history_mode = opts["history_mode"] + card["history_mode"] = ( + history_mode.value if hasattr(history_mode, "value") else history_mode + ) + if "max_parallel" in opts and opts["max_parallel"] is not None: + card["max_parallel"] = opts["max_parallel"] + if "child_timeout_sec" in opts and opts["child_timeout_sec"] is not None: + card["child_timeout_sec"] = opts["child_timeout_sec"] + if "max_display_instances" in opts and opts["max_display_instances"] is not None: + card["max_display_instances"] = opts["max_display_instances"] + function_tools = _serialize_string_list(agent_data.get("function_tools")) + if function_tools is not None: + card["function_tools"] = function_tools + tool_hooks = _serialize_string_list(agent_data.get("tool_hooks")) + if tool_hooks is not None: + card["tool_hooks"] = tool_hooks + elif card_type == "chain": + card["sequence"] = list(agent_data.get("sequence") or []) + cumulative = agent_data.get("cumulative", False) + if cumulative: + card["cumulative"] = True + elif card_type == "parallel": + card["fan_out"] = list(agent_data.get("fan_out") or []) + fan_in = agent_data.get("fan_in") + if fan_in: + card["fan_in"] = fan_in + include_request = agent_data.get("include_request") + if include_request is False: + card["include_request"] = False + elif card_type == "evaluator_optimizer": + card["generator"] = agent_data.get("generator") + card["evaluator"] = agent_data.get("evaluator") + if "min_rating" in agent_data: + card["min_rating"] = agent_data.get("min_rating") + if "max_refinements" in agent_data: + card["max_refinements"] = agent_data.get("max_refinements") + if "refinement_instruction" in agent_data: + card["refinement_instruction"] = agent_data.get("refinement_instruction") + elif card_type == "router": + card["agents"] = list(agent_data.get("router_agents") or []) + elif card_type == "orchestrator": + card["agents"] = list(agent_data.get("child_agents") or []) + card["plan_type"] = agent_data.get("plan_type", "full") + card["plan_iterations"] = agent_data.get("plan_iterations", 5) + elif card_type == "iterative_planner": + card["agents"] = list(agent_data.get("child_agents") or []) + card["plan_iterations"] = agent_data.get("plan_iterations", -1) + elif card_type == "MAKER": + card["worker"] = agent_data.get("worker") + card["k"] = agent_data.get("k", 3) + card["max_samples"] = agent_data.get("max_samples", 50) + card["match_strategy"] = agent_data.get("match_strategy", "exact") + red_flag = agent_data.get("red_flag_max_length") + if red_flag is not None: + card["red_flag_max_length"] = red_flag + + return card, instruction + + +def _dump_request_params(params: RequestParams | None) -> dict[str, Any] | None: + if params is None: + return None + dump = params.model_dump( + exclude_defaults=True, + exclude={"messages", "systemPrompt", "use_history", "model"}, + ) + return dump or None + + +def _serialize_skills( + skills: Any, +) -> str | list[str] | None: + if skills is None: + return None + if isinstance(skills, Path): + return str(skills) + if isinstance(skills, str): + return skills + if isinstance(skills, list): + serialized: list[str] = [] + for item in skills: + if isinstance(item, Path): + serialized.append(str(item)) + elif isinstance(item, str): + serialized.append(item) + return serialized if serialized else None + return None + + +def _serialize_string_list(value: Any) -> list[str] | None: + if value is None: + return None + if not isinstance(value, list): + return None + if not value: + return [] + if all(isinstance(item, str) for item in value): + return list(value) + return None diff --git a/src/fast_agent/core/direct_factory.py b/src/fast_agent/core/direct_factory.py index 6bef11887..00fb2884c 100644 --- a/src/fast_agent/core/direct_factory.py +++ b/src/fast_agent/core/direct_factory.py @@ -4,6 +4,7 @@ """ from functools import partial +from pathlib import Path from typing import Any, Protocol, TypeVar, cast from fast_agent.agents import McpAgent @@ -29,6 +30,7 @@ ) from fast_agent.llm.model_factory import ModelFactory from fast_agent.mcp.ui_agent import McpAgentWithUI +from fast_agent.tools.function_tool_loader import load_function_tools from fast_agent.types import RequestParams # Type aliases for improved readability and IDE support @@ -44,6 +46,7 @@ def _create_agent_with_ui_if_needed( agent_class: type, config: Any, context: Any, + **kwargs: Any, ) -> Any: """ Create an agent with UI support if MCP UI mode is enabled. @@ -52,6 +55,7 @@ def _create_agent_with_ui_if_needed( agent_class: The agent class to potentially enhance with UI config: Agent configuration context: Application context + **kwargs: Additional arguments passed to agent constructor (e.g., tools) Returns: Either a UI-enhanced agent instance or the original agent instance @@ -62,10 +66,10 @@ def _create_agent_with_ui_if_needed( if ui_mode != "disabled" and agent_class == McpAgent: # Use the UI-enhanced agent class instead of the base class - return McpAgentWithUI(config=config, context=context, ui_mode=ui_mode) + return McpAgentWithUI(config=config, context=context, ui_mode=ui_mode, **kwargs) else: # Create the original agent instance - return agent_class(config=config, context=context) + return agent_class(config=config, context=context, **kwargs) class AgentCreatorProtocol(Protocol): @@ -254,11 +258,22 @@ async def create_agents_by_type( }, ) else: + # Load function tools if configured + function_tools = [] + if config.function_tools: + # Use source_path from agent card for relative path resolution + source_path = agent_data.get("source_path") + base_path = Path(source_path).parent if source_path else None + function_tools = load_function_tools( + config.function_tools, base_path + ) + # Create agent with UI support if needed agent = _create_agent_with_ui_if_needed( McpAgent, config, app_instance.context, + tools=function_tools, ) await agent.initialize() diff --git a/src/fast_agent/core/fastagent.py b/src/fast_agent/core/fastagent.py index f7a59cbac..919fe60d2 100644 --- a/src/fast_agent/core/fastagent.py +++ b/src/fast_agent/core/fastagent.py @@ -207,6 +207,41 @@ def __init__( "--skills", help="Path to skills directory to use instead of default skills directories", ) + parser.add_argument( + "--dump", + "--dump-agents", + dest="dump_agents", + help="Export all loaded agents as Markdown AgentCards into a directory", + ) + parser.add_argument( + "--dump-yaml", + "--dump-agents-yaml", + dest="dump_agents_yaml", + help="Export all loaded agents as YAML AgentCards into a directory", + ) + parser.add_argument( + "--dump-agent", + help="Export a single agent by name", + ) + parser.add_argument( + "--dump-agent-path", + help="Output file path for --dump-agent", + ) + parser.add_argument( + "--dump-agent-yaml", + action="store_true", + help="Export a single agent as YAML (default: Markdown)", + ) + parser.add_argument( + "--reload", + action="store_true", + help="Enable manual AgentCard reloads (/reload)", + ) + parser.add_argument( + "--watch", + action="store_true", + help="Watch AgentCard paths and reload when files change", + ) if ignore_unknown_args: known_args, _ = parser.parse_known_args() @@ -309,6 +344,16 @@ def __init__( # Dictionary to store agent configurations from decorators self.agents: dict[str, dict[str, Any]] = {} + # Tracking for AgentCard-loaded agents + self._agent_card_sources: dict[str, Path] = {} + self._agent_card_roots: dict[Path, set[str]] = {} + self._agent_card_root_files: dict[Path, set[Path]] = {} + self._agent_card_file_cache: dict[Path, tuple[int, int]] = {} + self._agent_card_name_by_path: dict[Path, str] = {} + self._agent_card_histories: dict[str, list[Path]] = {} + self._agent_registry_version: int = 0 + self._agent_card_watch_task: asyncio.Task[None] | None = None + self._agent_card_reload_lock: asyncio.Lock | None = None @staticmethod def _normalize_skill_directories( @@ -348,6 +393,255 @@ def context(self) -> Context: """Access the application context""" return self.app.context + def load_agents(self, path: str | Path) -> list[str]: + """ + Load AgentCards from a file or directory and register them as agents. + + Loading is idempotent for the provided path: any previously loaded agents + from the same path that are no longer present are removed. + + Returns: + Sorted list of agent names loaded from the provided path. + """ + root = Path(path).expanduser().resolve() + changed = self._load_agent_cards_from_root(root, incremental=False) + if changed: + self._agent_registry_version += 1 + return sorted(self._agent_card_roots.get(root, set())) + + def load_agents_from_url(self, url: str) -> list[str]: + """Load an AgentCard from a URL (markdown or YAML).""" + import tempfile + + from fast_agent.core.agent_card_loader import load_agent_cards + from fast_agent.core.direct_decorators import _fetch_url_content + + content = _fetch_url_content(url) + + # Determine extension from URL + suffix = ".md" + url_lower = url.lower() + if url_lower.endswith((".yaml", ".yml")): + suffix = ".yaml" + elif url_lower.endswith((".md", ".markdown")): + suffix = ".md" + + # Write to temp file and parse + with tempfile.NamedTemporaryFile( + mode="w", suffix=suffix, delete=False, encoding="utf-8" + ) as f: + f.write(content) + temp_path = Path(f.name) + + try: + cards = load_agent_cards(temp_path) + loaded_names = [card.name for card in cards] + for card in cards: + # Check for conflicts + if card.name in self.agents and card.name not in self._agent_card_sources: + raise AgentConfigError( + f"Agent '{card.name}' already exists and is not from AgentCard", + f"URL: {url}", + ) + # Register the agent + self.agents[card.name] = card.agent_data + # Note: URL-loaded cards don't track source path (no reload support) + if card.message_files: + self._agent_card_histories[card.name] = card.message_files + # Apply skills + if cards: + self._apply_skills_to_agent_configs(self._default_skill_manifests) + self._agent_registry_version += 1 + return loaded_names + finally: + temp_path.unlink(missing_ok=True) + + async def reload_agents(self) -> bool: + """Reload all previously registered AgentCard roots.""" + if not self._agent_card_roots: + return False + + if self._agent_card_reload_lock is None: + self._agent_card_reload_lock = asyncio.Lock() + + async with self._agent_card_reload_lock: + changed = False + for root in sorted(self._agent_card_roots.keys()): + if self._load_agent_cards_from_root(root, incremental=True): + changed = True + + if changed: + self._agent_registry_version += 1 + return changed + + def _load_agent_cards_from_root(self, root: Path, *, incremental: bool) -> bool: + from fast_agent.core.agent_card_loader import load_agent_cards + + if not root.exists(): + if incremental: + current_files: set[Path] = set() + else: + raise AgentConfigError(f"AgentCard path not found: {root}") + else: + current_files = self._collect_agent_card_files(root) + + previous_files = self._agent_card_root_files.get(root, set()) + removed_files = previous_files - current_files + + current_stats: dict[Path, tuple[int, int]] = {} + for path_entry in list(current_files): + try: + stat = path_entry.stat() + except FileNotFoundError: + current_files.discard(path_entry) + continue + current_stats[path_entry] = (stat.st_mtime_ns, stat.st_size) + + if incremental: + changed_files = { + path_entry + for path_entry, signature in current_stats.items() + if self._agent_card_file_cache.get(path_entry) != signature + } + else: + changed_files = set(current_stats.keys()) + + cards: list[Any] = [] + for path_entry in sorted(changed_files): + cards.extend(load_agent_cards(path_entry)) + + self._apply_agent_card_updates( + root, + current_files=current_files, + removed_files=removed_files, + changed_cards=cards, + current_stats=current_stats, + ) + + return bool(removed_files or changed_files) + + @staticmethod + def _agent_card_extensions() -> set[str]: + return {".md", ".markdown", ".yaml", ".yml"} + + def _collect_agent_card_files(self, root: Path) -> set[Path]: + if root.is_dir(): + extensions = self._agent_card_extensions() + return { + entry + for entry in root.iterdir() + if entry.is_file() and entry.suffix.lower() in extensions + } + + if root.suffix.lower() not in self._agent_card_extensions(): + raise AgentConfigError(f"Unsupported AgentCard file extension: {root}") + return {root} + + def _apply_agent_card_updates( + self, + root: Path, + *, + current_files: set[Path], + removed_files: set[Path], + changed_cards: list[Any], + current_stats: dict[Path, tuple[int, int]], + ) -> None: + removed_names = { + self._agent_card_name_by_path[path] + for path in removed_files + if path in self._agent_card_name_by_path + } + + new_names_by_path: dict[Path, str] = {} + seen_names: dict[str, Path] = {} + for card in changed_cards: + if not hasattr(card, "name") or not hasattr(card, "path"): + raise AgentConfigError("Invalid AgentCard payload during reload") + if card.name in seen_names: + raise AgentConfigError( + f"Duplicate agent name '{card.name}' during reload", + f"Conflicts: {seen_names[card.name]} and {card.path}", + ) + seen_names[card.name] = card.path + new_names_by_path[card.path] = card.name + + existing_source = self._agent_card_sources.get(card.name) + if card.name in self.agents and existing_source is None: + raise AgentConfigError( + f"Agent '{card.name}' already exists and is not loaded from AgentCard", + f"Path: {root}", + ) + if existing_source is not None and existing_source != card.path: + if existing_source in removed_files: + continue + raise AgentConfigError( + f"Agent '{card.name}' already loaded from {existing_source}", + f"Path: {card.path}", + ) + + previous_name = self._agent_card_name_by_path.get(card.path) + if previous_name and previous_name != card.name: + removed_names.add(previous_name) + + for name in sorted(removed_names): + self.agents.pop(name, None) + self._agent_card_sources.pop(name, None) + self._agent_card_histories.pop(name, None) + + for path_entry in removed_files: + self._agent_card_name_by_path.pop(path_entry, None) + self._agent_card_file_cache.pop(path_entry, None) + + for card in changed_cards: + self.agents[card.name] = card.agent_data + + self._agent_card_sources[card.name] = card.path + self._agent_card_name_by_path[card.path] = card.name + + if card.message_files: + self._agent_card_histories[card.name] = card.message_files + else: + self._agent_card_histories.pop(card.name, None) + + for path_entry, signature in current_stats.items(): + self._agent_card_file_cache[path_entry] = signature + + self._agent_card_root_files[root] = set(current_files) + self._agent_card_roots[root] = { + self._agent_card_name_by_path[path_entry] + for path_entry in current_files + if path_entry in self._agent_card_name_by_path + } + + if changed_cards or removed_files: + self._apply_skills_to_agent_configs(self._default_skill_manifests) + + def _get_registry_version(self) -> int: + return self._agent_registry_version + + async def _watch_agent_cards(self) -> None: + roots = sorted(self._agent_card_roots.keys()) + if not roots: + return + + try: + from watchfiles import awatch # type: ignore[import-not-found] + + async for _changes in awatch(*roots): + await self.reload_agents() + except ImportError: + logger.info( + "watchfiles not available; falling back to polling for AgentCard reloads" + ) + try: + while True: + await asyncio.sleep(1.0) + await self.reload_agents() + except asyncio.CancelledError: + return + except asyncio.CancelledError: + return + # Decorator methods with precise signatures for IDE completion # Provide annotations so IDEs can discover these attributes on instances if TYPE_CHECKING: # pragma: no cover - typing aid only @@ -625,6 +919,7 @@ async def run(self) -> AsyncIterator["AgentApp"]: ) validate_server_references(self.context, self.agents) validate_workflow_references(self.agents) + self._handle_dump_requests() # Get a model factory function # Now cli_model_override is guaranteed to be defined @@ -661,7 +956,9 @@ def model_factory_func(model=None, request_params=None): if context_variables: global_prompt_context = context_variables - async def instantiate_agent_instance() -> AgentInstance: + async def instantiate_agent_instance( + app_override: AgentApp | None = None, + ) -> AgentInstance: async with instance_lock: agents_map = await create_agents_in_dependency_order( self.app, @@ -669,8 +966,18 @@ async def instantiate_agent_instance() -> AgentInstance: model_factory_func, ) validate_provider_keys_post_creation(agents_map) - instance = AgentInstance(AgentApp(agents_map), agents_map) + if app_override is None: + app = AgentApp(agents_map) + else: + app_override.set_agents(agents_map) + app = app_override + instance = AgentInstance( + app, + agents_map, + registry_version=self._agent_registry_version, + ) managed_instances.append(instance) + self._apply_agent_card_histories(instance.agents) if global_prompt_context: self._apply_instruction_context(instance, global_prompt_context) return instance @@ -685,6 +992,52 @@ async def dispose_agent_instance(instance: AgentInstance) -> None: wrapper = primary_instance.app active_agents = primary_instance.agents + async def refresh_shared_instance() -> bool: + nonlocal primary_instance, active_agents + if self._agent_registry_version <= primary_instance.registry_version: + return False + + new_instance = await instantiate_agent_instance(app_override=wrapper) + old_instance = primary_instance + primary_instance = new_instance + active_agents = new_instance.agents + await dispose_agent_instance(old_instance) + return True + + async def reload_and_refresh() -> bool: + changed = await self.reload_agents() + if not changed: + return False + return await refresh_shared_instance() + + async def load_card_and_refresh(source: str) -> list[str]: + if source.startswith(("http://", "https://")): + loaded_names = self.load_agents_from_url(source) + else: + loaded_names = self.load_agents(source) + await refresh_shared_instance() + return loaded_names + + async def load_card_source(source: str) -> list[str]: + if source.startswith(("http://", "https://")): + return self.load_agents_from_url(source) + return self.load_agents(source) + + reload_enabled = bool( + getattr(self.args, "reload", False) + or getattr(self.args, "watch", False) + ) + wrapper.set_reload_callback(reload_and_refresh if reload_enabled else None) + wrapper.set_refresh_callback( + refresh_shared_instance if reload_enabled else None + ) + wrapper.set_load_card_callback(load_card_and_refresh) + + if getattr(self.args, "watch", False) and self._agent_card_roots: + self._agent_card_watch_task = asyncio.create_task( + self._watch_agent_cards() + ) + self._server_instance_factory = instantiate_agent_instance self._server_instance_dispose = dispose_agent_instance self._server_managed_instances = managed_instances @@ -747,8 +1100,10 @@ async def dispose_agent_instance(instance: AgentInstance) -> None: dispose_instance=self._server_instance_dispose, instance_scope=instance_scope, server_name=server_name or f"{self.name}", + get_registry_version=self._get_registry_version, skills_directory_override=skills_override, permissions_enabled=permissions_enabled, + load_card_callback=load_card_source, ) # Run the ACP server (this is a blocking call) @@ -769,6 +1124,7 @@ async def dispose_agent_instance(instance: AgentInstance) -> None: server_name=server_name or f"{self.name}-MCP-Server", server_description=server_description, tool_description=tool_description, + get_registry_version=self._get_registry_version, ) # Run the server directly (this is a blocking call) @@ -875,6 +1231,14 @@ async def dispose_agent_instance(instance: AgentInstance) -> None: except: # noqa: E722 pass + if self._agent_card_watch_task is not None: + self._agent_card_watch_task.cancel() + try: + await self._agent_card_watch_task + except asyncio.CancelledError: + pass + self._agent_card_watch_task = None + # Print usage report before cleanup (show for user exits too) if ( getattr(self, "_server_managed_instances", None) @@ -927,6 +1291,86 @@ def _apply_instruction_context( # Use set_instruction() which handles syncing request_params and LLM agent.set_instruction(resolved) + def _apply_agent_card_histories(self, agents: dict[str, "AgentProtocol"]) -> None: + if not self._agent_card_histories: + return + for name, history_files in self._agent_card_histories.items(): + agent = agents.get(name) + if agent is None: + continue + messages: list[PromptMessageExtended] = [] + for history_file in history_files: + messages.extend(load_prompt(history_file)) + agent.clear(clear_prompts=True) + agent.message_history.extend(messages) + + def _handle_dump_requests(self) -> None: + dump_dir = getattr(self.args, "dump_agents", None) + dump_dir_yaml = getattr(self.args, "dump_agents_yaml", None) + dump_agent = getattr(self.args, "dump_agent", None) + dump_agent_path = getattr(self.args, "dump_agent_path", None) + dump_agent_yaml = getattr(self.args, "dump_agent_yaml", False) + + if dump_dir and dump_dir_yaml: + raise AgentConfigError( + "Only one of --dump or --dump-yaml may be set" + ) + + if dump_agent and dump_agent_path is None: + raise AgentConfigError("--dump-agent-path is required with --dump-agent") + if dump_agent_path is not None and not dump_agent: + raise AgentConfigError("--dump-agent is required with --dump-agent-path") + + if dump_agent and (dump_dir or dump_dir_yaml): + raise AgentConfigError( + "Use either --dump-agent or --dump/--dump-yaml, not both" + ) + + if not (dump_dir or dump_dir_yaml or dump_agent): + return + + if dump_dir or dump_dir_yaml: + output_dir_raw = dump_dir if dump_dir is not None else dump_dir_yaml + if output_dir_raw is None: + raise AgentConfigError("Missing output directory for agent dump") + output_dir = Path(output_dir_raw) + self._dump_agents_to_dir(output_dir, as_yaml=bool(dump_dir_yaml)) + raise SystemExit(0) + + if dump_agent: + if dump_agent_path is None: + raise AgentConfigError("--dump-agent-path is required with --dump-agent") + output_path = Path(dump_agent_path) + self._dump_single_agent(dump_agent, output_path, as_yaml=dump_agent_yaml) + raise SystemExit(0) + + def _dump_agents_to_dir(self, output_dir: Path, *, as_yaml: bool) -> None: + from fast_agent.core.agent_card_loader import dump_agents_to_dir + + dump_agents_to_dir( + self.agents, + output_dir, + as_yaml=as_yaml, + message_map=self._agent_card_histories, + ) + + def _dump_single_agent(self, name: str, output_path: Path, *, as_yaml: bool) -> None: + from fast_agent.core.agent_card_loader import dump_agent_to_path + + if name not in self.agents: + raise AgentConfigError( + f"Agent '{name}' not found for dump", + f"Available agents: {', '.join(self.agents.keys())}", + ) + message_paths = self._agent_card_histories.get(name) + dump_agent_to_path( + name, + self.agents[name], + output_path, + as_yaml=as_yaml, + message_paths=message_paths, + ) + def _apply_skills_to_agent_configs(self, default_skills: list[SkillManifest]) -> None: self._default_skill_manifests = list(default_skills) @@ -1206,6 +1650,7 @@ async def app_main(): class AgentInstance: app: AgentApp agents: dict[str, "AgentProtocol"] + registry_version: int = 0 async def shutdown(self) -> None: for agent in self.agents.values(): diff --git a/src/fast_agent/interfaces.py b/src/fast_agent/interfaces.py index 5ed3a40fa..a87c13429 100644 --- a/src/fast_agent/interfaces.py +++ b/src/fast_agent/interfaces.py @@ -249,6 +249,7 @@ async def show_assistant_message( name: str | None = None, model: str | None = None, additional_message: Text | None = None, + render_markdown: bool | None = None, ) -> None: ... async def attach_llm( diff --git a/src/fast_agent/llm/provider/openai/llm_azure.py b/src/fast_agent/llm/provider/openai/llm_azure.py index c123a4e2e..3010d4a21 100644 --- a/src/fast_agent/llm/provider/openai/llm_azure.py +++ b/src/fast_agent/llm/provider/openai/llm_azure.py @@ -1,13 +1,19 @@ +from typing import Any + +try: + from azure.identity import ( # ty: ignore[unresolved-import] + DefaultAzureCredential as _DefaultAzureCredential, + ) +except ImportError: + _DefaultAzureCredential = None # type: ignore[assignment] + from openai import AsyncAzureOpenAI, AsyncOpenAI, AuthenticationError from fast_agent.core.exceptions import ProviderKeyError from fast_agent.llm.provider.openai.llm_openai import OpenAILLM from fast_agent.llm.provider_types import Provider -try: - from azure.identity import DefaultAzureCredential # ty: ignore[unresolved-import] -except ImportError: - DefaultAzureCredential = None +DefaultAzureCredential: type[Any] | None = _DefaultAzureCredential def _extract_resource_name(url: str) -> str | None: diff --git a/src/fast_agent/mcp/server/agent_server.py b/src/fast_agent/mcp/server/agent_server.py index 8ba6b8977..19f5d61d8 100644 --- a/src/fast_agent/mcp/server/agent_server.py +++ b/src/fast_agent/mcp/server/agent_server.py @@ -50,12 +50,18 @@ def __init__( server_name: str = "FastAgent-MCP-Server", server_description: str | None = None, tool_description: str | None = None, + get_registry_version: Callable[[], int] | None = None, ) -> None: """Initialize the server with the provided agent app.""" self.primary_instance = primary_instance self._create_instance_task = create_instance self._dispose_instance_task = dispose_instance self._instance_scope = instance_scope + self._get_registry_version = get_registry_version + self._primary_registry_version = getattr(primary_instance, "registry_version", 0) + self._shared_instance_lock = asyncio.Lock() + self._shared_active_requests = 0 + self._stale_instances: list[AgentInstance] = [] self.mcp_server: FastMCP = FastMCP( name=server_name, instructions=server_description @@ -66,6 +72,7 @@ def __init__( self.mcp_server.settings.stateless_http = True self._tool_description = tool_description self._shared_instance_active = True + self._registered_agents: set[str] = set(primary_instance.agents.keys()) # Shutdown coordination self._graceful_shutdown_event = asyncio.Event() self._force_shutdown_event = asyncio.Event() @@ -103,6 +110,7 @@ def setup_tools(self) -> None: def register_agent_tools(self, agent_name: str) -> None: """Register tools for a specific agent.""" + self._registered_agents.add(agent_name) # Basic send message tool tool_description = ( @@ -111,9 +119,17 @@ def register_agent_tools(self, agent_name: str) -> None: else self._tool_description ) + agent = self.primary_instance.agents.get(agent_name) + agent_description = None + if agent is not None: + config = getattr(agent, "config", None) + agent_description = getattr(config, "description", None) + @self.mcp_server.tool( name=f"{agent_name}_send", - description=tool_description or f"Send a message to the {agent_name} agent", + description=tool_description + or agent_description + or f"Send a message to the {agent_name} agent", structured_output=False, # MCP 1.10.1 turns every tool in to a structured output ) @@ -187,6 +203,8 @@ async def get_history_prompt(ctx: MCPContext) -> list: async def _acquire_instance(self, ctx: MCPContext | None) -> AgentInstance: if self._instance_scope == "shared": + await self._maybe_refresh_shared_instance() + self._shared_active_requests += 1 return self.primary_instance if self._instance_scope == "request": @@ -210,7 +228,11 @@ async def _release_instance( *, reuse_connection: bool = False, ) -> None: - if self._instance_scope == "request": + if self._instance_scope == "shared": + if self._shared_active_requests > 0: + self._shared_active_requests -= 1 + await self._dispose_stale_instances_if_idle() + elif self._instance_scope == "request": await self._dispose_instance_task(instance) elif self._instance_scope == "connection" and reuse_connection is False: # Connection-scoped instances persist until session cleanup @@ -239,6 +261,40 @@ def _session_identifier(self, ctx: MCPContext | None) -> str | None: return request.headers.get("mcp-session-id") return None + async def _maybe_refresh_shared_instance(self) -> None: + if not self._get_registry_version: + return + latest_version = self._get_registry_version() + if latest_version <= self._primary_registry_version: + return + + async with self._shared_instance_lock: + latest_version = self._get_registry_version() + if latest_version <= self._primary_registry_version: + return + + new_instance = await self._create_instance_task() + old_instance = self.primary_instance + self.primary_instance = new_instance + self._primary_registry_version = getattr(new_instance, "registry_version", latest_version) + self._stale_instances.append(old_instance) + + new_agents = set(new_instance.agents.keys()) + missing = new_agents - self._registered_agents + for agent_name in sorted(missing): + self.register_agent_tools(agent_name) + + async def _dispose_stale_instances_if_idle(self) -> None: + if self._shared_active_requests: + return + if not self._stale_instances: + return + + stale = list(self._stale_instances) + self._stale_instances.clear() + for instance in stale: + await self._dispose_instance_task(instance) + async def _dispose_primary_instance(self) -> None: if self._shared_instance_active: try: @@ -246,6 +302,14 @@ async def _dispose_primary_instance(self) -> None: finally: self._shared_instance_active = False + async def _dispose_all_stale_instances(self) -> None: + if not self._stale_instances: + return + stale = list(self._stale_instances) + self._stale_instances.clear() + for instance in stale: + await self._dispose_instance_task(instance) + async def _dispose_all_connection_instances(self) -> None: pending_cleanups = list(self._connection_cleanup_tasks.values()) self._connection_cleanup_tasks.clear() @@ -589,6 +653,7 @@ async def _cleanup_stdio(self): logger.info("Performing minimal STDIO cleanup") await self._dispose_primary_instance() + await self._dispose_all_stale_instances() await self._dispose_all_connection_instances() logger.info("STDIO cleanup complete") @@ -616,6 +681,7 @@ async def shutdown(self): # Dispose shared instance if still active await self._dispose_primary_instance() + await self._dispose_all_stale_instances() except Exception as e: # Log any errors but don't let them prevent shutdown logger.error(f"Error during shutdown: {e}", exc_info=True) diff --git a/src/fast_agent/mcp/ui_mixin.py b/src/fast_agent/mcp/ui_mixin.py index 98f8f84c7..51b8653f8 100644 --- a/src/fast_agent/mcp/ui_mixin.py +++ b/src/fast_agent/mcp/ui_mixin.py @@ -95,6 +95,7 @@ async def show_assistant_message( name: str | None = None, model: str | None = None, additional_message: "Text" | None = None, + render_markdown: bool | None = None, ) -> None: """Override to display UI resources after showing assistant message.""" # Show the assistant message normally via parent @@ -106,6 +107,7 @@ async def show_assistant_message( name=name, model=model, additional_message=additional_message, + render_markdown=render_markdown, ) # Handle any pending UI resources from the previous user message diff --git a/src/fast_agent/tools/function_tool_loader.py b/src/fast_agent/tools/function_tool_loader.py new file mode 100644 index 000000000..e4d1a1862 --- /dev/null +++ b/src/fast_agent/tools/function_tool_loader.py @@ -0,0 +1,130 @@ +""" +Dynamic function tool loader. + +Loads Python functions from files for use as agent tools. +Supports both direct callables and string specs like "module.py:function_name". +""" + +import importlib.util +from collections.abc import Callable +from pathlib import Path +from typing import Any + +from mcp.server.fastmcp.tools.base import Tool as FastMCPTool + +from fast_agent.core.exceptions import AgentConfigError +from fast_agent.core.logging.logger import get_logger + +logger = get_logger(__name__) + + +def load_function_from_spec(spec: str, base_path: Path | None = None) -> Callable[..., Any]: + """ + Load a Python function from a spec string. + + Args: + spec: A string in the format "module.py:function_name" or "path/to/module.py:function_name" + base_path: Optional base path for resolving relative module paths. + If None, uses current working directory. + + Returns: + The loaded callable function. + + Raises: + AgentConfigError: If the spec format is invalid or the tool cannot be loaded. + """ + if ":" not in spec: + raise AgentConfigError( + f"Invalid function tool spec '{spec}'. Expected format: 'module.py:function_name'" + ) + + module_path_str, func_name = spec.rsplit(":", 1) + module_path = Path(module_path_str) + + # Resolve relative paths + if not module_path.is_absolute(): + if base_path is not None: + module_path = (base_path / module_path).resolve() + else: + module_path = Path.cwd() / module_path + + if not module_path.exists(): + raise AgentConfigError( + f"Function tool module file not found for '{spec}'", + f"Resolved path: {module_path}", + ) + + # Generate a unique module name to avoid conflicts + module_name = f"_function_tool_{module_path.stem}_{id(spec)}" + + # Load the module dynamically + spec_obj = importlib.util.spec_from_file_location(module_name, module_path) + if spec_obj is None or spec_obj.loader is None: + raise AgentConfigError( + f"Failed to create module spec for '{spec}'", + f"Resolved path: {module_path}", + ) + + module = importlib.util.module_from_spec(spec_obj) + try: + spec_obj.loader.exec_module(module) + except Exception as exc: # noqa: BLE001 + raise AgentConfigError( + f"Failed to import function tool module for '{spec}'", + str(exc), + ) from exc + + # Get the function from the module + if not hasattr(module, func_name): + raise AgentConfigError( + f"Function '{func_name}' not found for '{spec}'", + f"Module path: {module_path}", + ) + + func = getattr(module, func_name) + if not callable(func): + raise AgentConfigError( + f"Function '{func_name}' is not callable for '{spec}'", + f"Module path: {module_path}", + ) + + return func + + +def load_function_tools( + tools_config: list[Callable[..., Any] | str] | None, + base_path: Path | None = None, +) -> list[FastMCPTool]: + """ + Load function tools from a config list. + + Args: + tools_config: List of either: + - Callable functions (used directly) + - String specs like "module.py:function_name" (loaded dynamically) + base_path: Base path for resolving relative module paths in string specs. + + Returns: + List of FastMCPTool objects ready for use with an agent. + """ + if not tools_config: + return [] + + result: list[FastMCPTool] = [] + + for tool_spec in tools_config: + try: + if callable(tool_spec): + # Direct callable - wrap it + result.append(FastMCPTool.from_function(tool_spec)) + elif isinstance(tool_spec, str): + # String spec - load and wrap + func = load_function_from_spec(tool_spec, base_path) + result.append(FastMCPTool.from_function(func)) + else: + logger.warning(f"Skipping invalid function tool config: {tool_spec}") + except Exception as e: + logger.error(f"Failed to load function tool '{tool_spec}': {e}") + raise + + return result diff --git a/src/fast_agent/ui/command_payloads.py b/src/fast_agent/ui/command_payloads.py index 86f013715..230ac7204 100644 --- a/src/fast_agent/ui/command_payloads.py +++ b/src/fast_agent/ui/command_payloads.py @@ -86,6 +86,19 @@ class LoadHistoryCommand(CommandBase): kind: Literal["load_history"] = "load_history" +@dataclass(frozen=True, slots=True) +class LoadAgentCardCommand(CommandBase): + filename: str | None + add_tool: bool + error: str | None + kind: Literal["load_agent_card"] = "load_agent_card" + + +@dataclass(frozen=True, slots=True) +class ReloadAgentsCommand(CommandBase): + kind: Literal["reload_agents"] = "reload_agents" + + CommandPayload = ( ShowUsageCommand | ShowSystemCommand @@ -101,6 +114,8 @@ class LoadHistoryCommand(CommandBase): | SwitchAgentCommand | SaveHistoryCommand | LoadHistoryCommand + | LoadAgentCardCommand + | ReloadAgentsCommand ) diff --git a/src/fast_agent/ui/console.py b/src/fast_agent/ui/console.py index dbc506f5b..6b239ded1 100644 --- a/src/fast_agent/ui/console.py +++ b/src/fast_agent/ui/console.py @@ -10,7 +10,7 @@ from __future__ import annotations import os -from typing import Literal +from typing import IO, Literal from rich.console import Console @@ -25,6 +25,54 @@ def _create_console(stderr: bool) -> Console: return Console(stderr=stderr, color_system="auto") +# When uvloop registers a reader, it makes the file description non-blocking +# and doesn't restore it. If stdin/stdout/stderr share the same TTY, writes +# can raise BlockingIOError. Use a dedicated blocking TTY stream when needed. +_blocking_console_file: IO[str] | None = None + + +def _open_blocking_tty(stream: IO[str]) -> IO[str] | None: + try: + fd = stream.fileno() + except Exception: + return None + if not os.isatty(fd): + return None + try: + tty_path = os.ttyname(fd) + except OSError: + tty_path = "/dev/tty" + try: + tty_fd = os.open(tty_path, os.O_WRONLY | os.O_NOCTTY) + except OSError: + return None + try: + os.set_blocking(tty_fd, True) + except Exception: + pass + return os.fdopen(tty_fd, "w", buffering=1, encoding="utf-8", errors="replace") + + +def ensure_blocking_console() -> None: + """ + Ensure the shared console writes to a blocking TTY stream when stdout/stderr + has been made non-blocking by the event loop. + """ + global _blocking_console_file + + current_file = console.file + try: + if os.get_blocking(current_file.fileno()): + return + except Exception: + return + + if _blocking_console_file is None or _blocking_console_file.closed: + _blocking_console_file = _open_blocking_tty(current_file) + if _blocking_console_file is not None: + console.file = _blocking_console_file + + # Allow forcing stderr via env (useful for ACP/stdio wrappers that import fast_agent early) _default_stderr = _env_truthy(os.environ.get("FAST_AGENT_FORCE_STDERR")) @@ -43,6 +91,7 @@ def configure_console_stream(stream: Literal["stdout", "stderr"]) -> None: # Reset the underlying stream selection so Console.file uses the new stderr flag console._file = None # type: ignore[attr-defined] console.stderr = target_is_stderr + ensure_blocking_console() # Error console for application errors diff --git a/src/fast_agent/ui/console_display.py b/src/fast_agent/ui/console_display.py index 0874a9c36..406d95fa8 100644 --- a/src/fast_agent/ui/console_display.py +++ b/src/fast_agent/ui/console_display.py @@ -168,6 +168,7 @@ def display_message( truncate_content: bool = True, additional_message: Text | None = None, pre_content: Text | Group | None = None, + render_markdown: bool | None = None, ) -> None: """ Unified method to display formatted messages to the console. @@ -184,6 +185,7 @@ def display_message( truncate_content: Whether to truncate long content additional_message: Optional Rich Text appended after the main content pre_content: Optional Rich Text shown before the main content + render_markdown: Force markdown rendering (True) or plain rendering (False) """ # Get configuration for this message type config = MESSAGE_CONFIGS[message_type] @@ -212,7 +214,12 @@ def display_message( else: console.console.print(pre_content, markup=self._markup) self._display_content( - content, truncate_content, is_error, message_type, check_markdown_markers=False + content, + truncate_content, + is_error, + message_type, + check_markdown_markers=False, + render_markdown=render_markdown, ) if additional_message: console.console.print(additional_message, markup=self._markup) @@ -232,6 +239,7 @@ def _display_content( is_error: bool = False, message_type: MessageType | None = None, check_markdown_markers: bool = False, + render_markdown: bool | None = None, ) -> None: """ Display content in the appropriate format. @@ -242,6 +250,7 @@ def _display_content( is_error: Whether this is error content (affects styling) message_type: Type of message to determine appropriate styling check_markdown_markers: If True, only use markdown rendering when markers are present + render_markdown: If set, force markdown rendering (True) or plain rendering (False) """ import json import re @@ -263,6 +272,36 @@ def _display_content( # Handle different content types if isinstance(content, str): + if render_markdown is not None: + try: + json_obj = json.loads(content) + if truncate and self.config and self.config.logger.truncate_tools: + pretty_obj = Pretty(json_obj, max_length=10, max_string=50) + else: + pretty_obj = Pretty(json_obj) + if style: + console.console.print(pretty_obj, style=style, markup=self._markup) + else: + console.console.print(pretty_obj, markup=self._markup) + return + except (JSONDecodeError, TypeError, ValueError): + if render_markdown: + prepared_content = prepare_markdown_content(content, self._escape_xml) + md = Markdown(prepared_content, code_theme=CODE_STYLE) + console.console.print(md, markup=self._markup) + else: + if ( + truncate + and self.config + and self.config.logger.truncate_tools + and len(content) > 360 + ): + content = content[:360] + "..." + if style: + console.console.print(content, style=style, markup=self._markup) + else: + console.console.print(content, markup=self._markup) + return # Try to detect and handle different string formats try: # Try as JSON first @@ -338,6 +377,27 @@ def _display_content( else: console.console.print(content, markup=self._markup) elif isinstance(content, Text): + if render_markdown is not None: + plain_text = content.plain + try: + json_obj = json.loads(plain_text) + if truncate and self.config and self.config.logger.truncate_tools: + pretty_obj = Pretty(json_obj, max_length=10, max_string=50) + else: + pretty_obj = Pretty(json_obj) + if style: + console.console.print(pretty_obj, style=style, markup=self._markup) + else: + console.console.print(pretty_obj, markup=self._markup) + return + except (JSONDecodeError, TypeError, ValueError): + if render_markdown: + prepared_content = prepare_markdown_content(plain_text, self._escape_xml) + md = Markdown(prepared_content, code_theme=CODE_STYLE) + console.console.print(md, markup=self._markup) + else: + console.console.print(content, markup=self._markup) + return # Rich Text object - check if it contains markdown plain_text = content.plain @@ -706,6 +766,7 @@ async def show_assistant_message( name: str | None = None, model: str | None = None, additional_message: Text | None = None, + render_markdown: bool | None = None, ) -> None: """Display an assistant message in a formatted panel. @@ -718,6 +779,7 @@ async def show_assistant_message( name: Optional agent name model: Optional model name for right info additional_message: Optional additional styled message to append + render_markdown: Force markdown rendering (True) or plain rendering (False) """ if self.config and not self.config.logger.show_chat: return @@ -748,6 +810,7 @@ async def show_assistant_message( truncate_content=False, # Assistant messages shouldn't be truncated additional_message=additional_message, pre_content=pre_content, + render_markdown=render_markdown, ) # Handle mermaid diagrams separately (after the main message) diff --git a/src/fast_agent/ui/enhanced_prompt.py b/src/fast_agent/ui/enhanced_prompt.py index 6c57d1f47..c1424a108 100644 --- a/src/fast_agent/ui/enhanced_prompt.py +++ b/src/fast_agent/ui/enhanced_prompt.py @@ -33,7 +33,9 @@ ClearCommand, CommandPayload, ListToolsCommand, + LoadAgentCardCommand, LoadHistoryCommand, + ReloadAgentsCommand, SaveHistoryCommand, SelectPromptCommand, ShowHistoryCommand, @@ -110,6 +112,16 @@ def _load_history_cmd(filename: str | None, error: str | None) -> LoadHistoryCom return LoadHistoryCommand(filename=filename, error=error) +def _load_agent_card_cmd( + filename: str | None, add_tool: bool, error: str | None +) -> LoadAgentCardCommand: + return LoadAgentCardCommand(filename=filename, add_tool=add_tool, error=error) + + +def _reload_agents_cmd() -> ReloadAgentsCommand: + return ReloadAgentsCommand() + + def _select_prompt_cmd( prompt_index: int | None, prompt_name: str | None ) -> SelectPromptCommand: @@ -441,6 +453,8 @@ def __init__( "markdown": "Show last assistant message without markdown formatting", "save_history": "Save history; .json = MCP JSON, others = Markdown", "load_history": "Load history from a file", + "card": "Load an AgentCard (add --tool to expose as tool)", + "reload": "Reload AgentCards from disk", "help": "Show commands and shortcuts", "EXIT": "Exit fast-agent, terminating any running workflows", "STOP": "Stop this prompting session and move to next workflow step", @@ -512,6 +526,56 @@ def _complete_history_files(self, partial: str): except PermissionError: pass # Skip directories we can't read + def _complete_agent_card_files(self, partial: str): + """Generate completions for AgentCard files (.md/.markdown/.yaml/.yml).""" + from pathlib import Path + + if partial: + partial_path = Path(partial) + if partial.endswith("/") or partial.endswith(os.sep): + search_dir = partial_path + prefix = "" + else: + search_dir = partial_path.parent if partial_path.parent != partial_path else Path(".") + prefix = partial_path.name + else: + search_dir = Path(".") + prefix = "" + + if not search_dir.exists(): + return + + card_extensions = {".md", ".markdown", ".yaml", ".yml"} + try: + for entry in sorted(search_dir.iterdir()): + name = entry.name + if name.startswith("."): + continue + if not name.lower().startswith(prefix.lower()): + continue + + if search_dir == Path("."): + completion_text = name + else: + completion_text = str(search_dir / name) + + if entry.is_dir(): + yield Completion( + completion_text + "/", + start_position=-len(partial), + display=name + "/", + display_meta="directory", + ) + elif entry.is_file() and entry.suffix.lower() in card_extensions: + yield Completion( + completion_text, + start_position=-len(partial), + display=name, + display_meta="AgentCard", + ) + except PermissionError: + pass # Skip directories we can't read + def get_completions(self, document, complete_event): """Synchronous completions method - this is what prompt_toolkit expects by default""" text = document.text_before_cursor @@ -528,6 +592,11 @@ def get_completions(self, document, complete_event): yield from self._complete_history_files(partial) return + if text_lower.startswith("/card "): + partial = text[len("/card "):] + yield from self._complete_agent_card_files(partial) + return + # Complete commands if text_lower.startswith("/"): cmd = text_lower[1:] @@ -807,6 +876,27 @@ def parse_special_input(text: str) -> str | CommandPayload: if not filename: return _load_history_cmd(None, "Filename required for load_history") return _load_history_cmd(filename, None) + if cmd == "card": + remainder = cmd_parts[1].strip() if len(cmd_parts) > 1 else "" + if not remainder: + return _load_agent_card_cmd(None, False, "Filename required for /card") + try: + tokens = shlex.split(remainder) + except ValueError as exc: + return _load_agent_card_cmd(None, False, f"Invalid arguments: {exc}") + add_tool = False + filename = None + for token in tokens: + if token in {"tool", "--tool", "--as-tool", "-t"}: + add_tool = True + continue + if filename is None: + filename = token + if not filename: + return _load_agent_card_cmd(None, add_tool, "Filename required for /card") + return _load_agent_card_cmd(filename, add_tool, None) + if cmd == "reload": + return _reload_agents_cmd() if cmd in ("mcpstatus", "mcp"): return _show_mcp_status_cmd() if cmd == "prompt": @@ -1415,6 +1505,8 @@ async def handle_special_commands( " [dim]Default: Timestamped filename (e.g., 25_01_15_14_30-conversation.json)[/dim]" ) rich_print(" /load_history - Load chat history from a file") + rich_print(" /card [--tool] - Load an AgentCard") + rich_print(" /reload - Reload AgentCards from disk") rich_print(" @agent_name - Switch to agent") rich_print(" STOP - Return control back to the workflow") rich_print(" EXIT - Exit fast-agent, terminating any running workflows") diff --git a/src/fast_agent/ui/interactive_prompt.py b/src/fast_agent/ui/interactive_prompt.py index 06ab19438..06677b38a 100644 --- a/src/fast_agent/ui/interactive_prompt.py +++ b/src/fast_agent/ui/interactive_prompt.py @@ -53,7 +53,9 @@ ListPromptsCommand, ListSkillsCommand, ListToolsCommand, + LoadAgentCardCommand, LoadHistoryCommand, + ReloadAgentsCommand, SaveHistoryCommand, SelectPromptCommand, ShowHistoryCommand, @@ -347,6 +349,105 @@ async def prompt_loop( except Exception as e: rich_print(f"[red]Error loading history: {e}[/red]") continue + case LoadAgentCardCommand( + filename=filename, add_tool=add_tool, error=error + ): + if error: + rich_print(f"[red]{error}[/red]") + continue + + if filename is None: + rich_print("[red]Filename required for /card[/red]") + continue + + if not prompt_provider.can_load_agent_cards(): + rich_print( + "[yellow]AgentCard loading is not available in this session.[/yellow]" + ) + continue + + try: + loaded_names = await prompt_provider.load_agent_card( + filename + ) + except Exception as exc: + rich_print(f"[red]AgentCard load failed: {exc}[/red]") + continue + + available_agents = list(prompt_provider.agent_names()) + available_agents_set = set(available_agents) + self.agent_types = prompt_provider.agent_types() + + if agent not in available_agents_set: + if available_agents: + agent = available_agents[0] + else: + rich_print("[red]No agents available after load.[/red]") + return result + + if not loaded_names: + rich_print("[green]AgentCard loaded.[/green]") + else: + name_list = ", ".join(loaded_names) + rich_print(f"[green]Loaded AgentCard(s): {name_list}[/green]") + + if add_tool: + parent = prompt_provider._agent(agent) + add_tool_fn = getattr(parent, "add_agent_tool", None) + if not callable(add_tool_fn): + rich_print( + "[yellow]Current agent does not support tool injection.[/yellow]" + ) + continue + + added_tools: list[str] = [] + for child_name in loaded_names: + try: + child = prompt_provider._agent(child_name) + except Exception: + continue + tool_name = add_tool_fn(child) + added_tools.append(tool_name) + + if added_tools: + tool_list = ", ".join(added_tools) + rich_print( + f"[green]Added tool(s): {tool_list}[/green]" + ) + continue + case ReloadAgentsCommand(): + if not prompt_provider.can_reload_agents(): + rich_print( + "[yellow]Reload is not available in this session.[/yellow]" + ) + continue + + reloadable = prompt_provider.reload_agents + try: + changed = await reloadable() + except Exception as exc: + rich_print(f"[red]Reload failed: {exc}[/red]") + continue + + if not changed: + rich_print("[dim]No AgentCard changes detected.[/dim]") + continue + + available_agents = list(prompt_provider.agent_names()) + available_agents_set = set(available_agents) + self.agent_types = prompt_provider.agent_types() + + if agent not in available_agents_set: + if available_agents: + agent = available_agents[0] + else: + rich_print( + "[red]No agents available after reload.[/red]" + ) + return result + + rich_print("[green]AgentCards reloaded.[/green]") + continue case _: pass diff --git a/src/fast_agent/ui/rich_progress.py b/src/fast_agent/ui/rich_progress.py index a176912d9..7d8737dd8 100644 --- a/src/fast_agent/ui/rich_progress.py +++ b/src/fast_agent/ui/rich_progress.py @@ -9,6 +9,7 @@ from fast_agent.event_progress import ProgressAction, ProgressEvent from fast_agent.ui.console import console as default_console +from fast_agent.ui.console import ensure_blocking_console class RichProgressDisplay: @@ -33,11 +34,12 @@ def __init__(self, console: Console | None = None) -> None: def start(self) -> None: """start""" - + ensure_blocking_console() self._progress.start() def stop(self) -> None: """Stop and clear the progress display.""" + ensure_blocking_console() # Set paused first to prevent race with incoming updates self._paused = True # Hide all tasks before stopping (like pause does) @@ -48,6 +50,7 @@ def stop(self) -> None: def pause(self) -> None: """Pause the progress display.""" if not self._paused: + ensure_blocking_console() self._paused = True for task in self._progress.tasks: task.visible = False @@ -56,6 +59,7 @@ def pause(self) -> None: def resume(self) -> None: """Resume the progress display.""" if self._paused: + ensure_blocking_console() for task in self._progress.tasks: task.visible = True self._paused = False diff --git a/tests/integration/function_tools/agent.md b/tests/integration/function_tools/agent.md new file mode 100644 index 000000000..8fee13c55 --- /dev/null +++ b/tests/integration/function_tools/agent.md @@ -0,0 +1,8 @@ +--- +type: agent +name: calc +function_tools: + - tools.py:add +model: passthrough +instruction: Calculator agent. +--- diff --git a/tests/integration/function_tools/fastagent.config.yaml b/tests/integration/function_tools/fastagent.config.yaml new file mode 100644 index 000000000..61dd58473 --- /dev/null +++ b/tests/integration/function_tools/fastagent.config.yaml @@ -0,0 +1 @@ +default_model: passthrough diff --git a/tests/integration/function_tools/test_function_tools.py b/tests/integration/function_tools/test_function_tools.py new file mode 100644 index 000000000..f7c7ca8a9 --- /dev/null +++ b/tests/integration/function_tools/test_function_tools.py @@ -0,0 +1,10 @@ +import pytest + + +@pytest.mark.integration +@pytest.mark.asyncio +async def test_function_tools_from_card(fast_agent): + fast_agent.load_agents("agent.md") + async with fast_agent.run() as agent: + tools = await agent.calc.list_tools() + assert any(t.name == "add" for t in tools.tools) diff --git a/tests/integration/function_tools/tools.py b/tests/integration/function_tools/tools.py new file mode 100644 index 000000000..c9fb84c72 --- /dev/null +++ b/tests/integration/function_tools/tools.py @@ -0,0 +1,3 @@ +def add(a: int, b: int) -> int: + """Add two numbers.""" + return a + b diff --git a/tests/unit/fast_agent/agents/test_tool_agent_add_agent_tool.py b/tests/unit/fast_agent/agents/test_tool_agent_add_agent_tool.py new file mode 100644 index 000000000..033a596d3 --- /dev/null +++ b/tests/unit/fast_agent/agents/test_tool_agent_add_agent_tool.py @@ -0,0 +1,63 @@ +import pytest +from mcp import Tool + +from fast_agent.agents.agent_types import AgentConfig +from fast_agent.agents.llm_agent import LlmAgent +from fast_agent.agents.tool_agent import ToolAgent +from fast_agent.core.prompt import Prompt +from fast_agent.types import PromptMessageExtended, RequestParams + + +class DummyChild(LlmAgent): + def __init__(self, config: AgentConfig) -> None: + super().__init__(config=config, context=None) + self.spawned: list[DummyChild] = [] + self.history_loads: list[list[PromptMessageExtended] | None] = [] + self.generated: list[list[PromptMessageExtended]] = [] + + def load_message_history(self, messages: list[PromptMessageExtended] | None) -> None: + super().load_message_history(messages) + self.history_loads.append(messages) + + async def spawn_detached_instance(self, *, name: str | None = None) -> "DummyChild": + clone_config = AgentConfig( + name=name or self.config.name, + instruction=self.instruction, + ) + clone = DummyChild(clone_config) + clone.initialized = True + self.spawned.append(clone) + return clone + + async def generate_impl( + self, + messages: list[PromptMessageExtended], + request_params: RequestParams | None = None, + tools: list[Tool] | None = None, + ) -> PromptMessageExtended: + self.generated.append(messages) + return Prompt.assistant("ok") + + +@pytest.mark.unit +@pytest.mark.asyncio +async def test_add_agent_tool_uses_stateless_clone_history() -> None: + parent = ToolAgent(AgentConfig("parent"), []) + child = DummyChild(AgentConfig("child", instruction="child")) + child.load_message_history([Prompt.user("seed")]) + + tool_name = parent.add_agent_tool(child) + tool = parent._execution_tools[tool_name] + + assert await tool.run({"text": "hello"}) == "ok" + assert len(child.spawned) == 1 + clone_one = child.spawned[0] + assert clone_one.history_loads == [[]] + assert clone_one.message_history == [] + + assert await tool.run({"text": "again"}) == "ok" + assert len(child.spawned) == 2 + clone_two = child.spawned[1] + assert clone_two.history_loads == [[]] + assert clone_two.message_history == [] + assert clone_two is not clone_one diff --git a/tests/unit/fast_agent/mcp/test_ui_mixin.py b/tests/unit/fast_agent/mcp/test_ui_mixin.py index 9c5b5a453..7646c0a29 100644 --- a/tests/unit/fast_agent/mcp/test_ui_mixin.py +++ b/tests/unit/fast_agent/mcp/test_ui_mixin.py @@ -48,6 +48,7 @@ async def show_assistant_message( name: str | None = None, model: str | None = None, additional_message: Text | None = None, + render_markdown: bool | None = None, ) -> None: """Stub implementation with correct signature.""" pass diff --git a/tests/unit/fast_agent/ui/test_agent_completer.py b/tests/unit/fast_agent/ui/test_agent_completer.py index c7589130a..d05f017b0 100644 --- a/tests/unit/fast_agent/ui/test_agent_completer.py +++ b/tests/unit/fast_agent/ui/test_agent_completer.py @@ -146,6 +146,51 @@ def test_get_completions_for_load_shortcut(): os.chdir(original_cwd) +def test_complete_agent_card_files_finds_md_and_yaml(): + """Test that _complete_agent_card_files finds AgentCard files.""" + with tempfile.TemporaryDirectory() as tmpdir: + (Path(tmpdir) / "agent.md").touch() + (Path(tmpdir) / "agent.yaml").touch() + (Path(tmpdir) / "agent.yml").touch() + (Path(tmpdir) / "agent.txt").touch() + + completer = AgentCompleter(agents=["agent1"]) + + original_cwd = os.getcwd() + try: + os.chdir(tmpdir) + + completions = list(completer._complete_agent_card_files("")) + names = [c.text for c in completions] + + assert "agent.md" in names + assert "agent.yaml" in names + assert "agent.yml" in names + assert "agent.txt" not in names + finally: + os.chdir(original_cwd) + + +def test_get_completions_for_card_command(): + """Test get_completions provides file completions after /card.""" + with tempfile.TemporaryDirectory() as tmpdir: + (Path(tmpdir) / "agent.md").touch() + + completer = AgentCompleter(agents=["agent1"]) + + original_cwd = os.getcwd() + try: + os.chdir(tmpdir) + + doc = Document("/card ", cursor_position=6) + completions = list(completer.get_completions(doc, None)) + names = [c.text for c in completions] + + assert "agent.md" in names + finally: + os.chdir(original_cwd) + + def test_get_completions_skips_hidden_files(): """Test that hidden files are not included in completions.""" with tempfile.TemporaryDirectory() as tmpdir: