Skip to content

Commit 14cf310

Browse files
authored
Fix/types (#570)
* typesafe * imports * typesafe * refactor bedrock, pyproject * type safety * type safety, stream fixes, glm-4.7 * types and tests * lint * type safety, streaming fix * type saftey * type checks * add ty to ci * typecheck * add ty, typecheck, diff for toad * fix typecheck script * updates to streaming, diff, default model for gpt-oss-120b * text changes * ty lockfile * diffs, types
1 parent 695d1f5 commit 14cf310

File tree

127 files changed

+4292
-1804
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

127 files changed

+4292
-1804
lines changed

.github/workflows/checks.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@ jobs:
4848
- name: Run pyright
4949
run: uv run scripts/lint.py
5050

51+
- name: Run ty
52+
run: uv run scripts/typecheck.py
53+
5154
test:
5255
runs-on: ubuntu-latest
5356
steps:

.pre-commit-config.yaml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,3 +8,10 @@ repos:
88
args: [--fix]
99
# Run the formatter.
1010
- id: ruff-format
11+
- repo: local
12+
hooks:
13+
- id: ty
14+
name: ty check
15+
entry: uv run scripts/typecheck.py
16+
language: system
17+
pass_filenames: false

examples/tool-use-agent/agent.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,9 @@ def __init__(
2626
@fast.custom(CustomToolAgent)
2727
async def main() -> None:
2828
async with fast.run() as agent:
29-
await agent.default.generate("What is the topic of the video call no.1234?")
29+
await agent.default.generate(
30+
"What is the topic of the video call no.1234?",
31+
)
3032

3133

3234
if __name__ == "__main__":

publish/hf-inference-acp/src/hf_inference_acp/agents.py

Lines changed: 89 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -64,44 +64,49 @@ def _normalize_hf_model(model: str) -> str:
6464
return model
6565

6666

67-
async def _lookup_and_format_providers(model: str) -> str | None:
68-
"""Look up inference providers for a model and return a formatted message.
67+
def _resolve_alias_display(model: str) -> tuple[str, str] | None:
68+
"""Resolve alias to full model string for display, preserving suffix overrides."""
69+
from fast_agent.llm.model_factory import ModelFactory
6970

70-
Returns None if the model is not a HuggingFace model (no '/').
71-
"""
72-
import random
71+
if not model:
72+
return None
7373

74-
# Extract the HF model ID from various formats
75-
model_id = model
74+
alias_key = model
75+
alias_suffix: str | None = None
76+
if ":" in model:
77+
alias_key, alias_suffix = model.rsplit(":", 1)
7678

77-
# Strip hf. prefix if present
78-
if model_id.startswith("hf."):
79-
model_id = model_id[3:]
79+
alias_target = ModelFactory.MODEL_ALIASES.get(alias_key)
80+
if not alias_target:
81+
return None
8082

81-
# Strip :provider suffix if present
82-
if ":" in model_id:
83-
model_id = model_id.rsplit(":", 1)[0]
83+
resolved = alias_target
84+
if alias_suffix:
85+
if ":" in resolved:
86+
resolved = resolved.rsplit(":", 1)[0]
87+
resolved = f"{resolved}:{alias_suffix}"
8488

85-
# Must have org/model format
86-
if "/" not in model_id:
87-
return None
89+
return model, resolved
8890

89-
from fast_agent.llm.hf_inference_lookup import lookup_inference_providers
91+
92+
async def _lookup_and_format_providers(model: str) -> str | None:
93+
"""Look up inference providers for a model and return a formatted message.
94+
95+
Returns None if the model is not a HuggingFace model (no '/').
96+
"""
97+
from fast_agent.llm.hf_inference_lookup import (
98+
format_provider_help_message,
99+
lookup_inference_providers,
100+
normalize_hf_model_id,
101+
)
102+
103+
model_id = normalize_hf_model_id(model)
104+
if model_id is None:
105+
return None
90106

91107
try:
92108
result = await lookup_inference_providers(model_id)
93-
if result.has_providers:
94-
providers = result.format_provider_list()
95-
model_strings = result.format_model_strings()
96-
example = random.choice(model_strings)
97-
return (
98-
f"**Available providers:** {providers}\n\n"
99-
f"**Autoroutes if no provider specified. Example use:** `/set-model {example}`"
100-
)
101-
elif result.exists:
102-
return "No inference providers currently available for this model."
103-
else:
104-
return None
109+
return format_provider_help_message(result)
105110
except Exception:
106111
return None
107112

@@ -131,8 +136,9 @@ async def attach_llm(self, llm_factory, model=None, request_params=None, **kwarg
131136
llm = await super().attach_llm(llm_factory, model, request_params, **kwargs)
132137

133138
# Set up wizard callback if LLM supports it
134-
if hasattr(llm, "set_completion_callback"):
135-
llm.set_completion_callback(self._on_wizard_complete)
139+
callback_setter = getattr(llm, "set_completion_callback", None)
140+
if callback_setter is not None:
141+
callback_setter(self._on_wizard_complete)
136142

137143
return llm
138144

@@ -220,32 +226,45 @@ def acp_session_commands_allowlist(self) -> set[str]:
220226

221227
async def _handle_set_model(self, arguments: str) -> str:
222228
"""Handler for /set-model command."""
229+
from fast_agent.llm.hf_inference_lookup import validate_hf_model
223230
from fast_agent.llm.model_factory import ModelFactory
224231

225-
model = arguments.strip()
232+
raw_model = arguments.strip()
233+
model = raw_model
226234
if not model:
227235
return format_model_list_help()
228236

237+
alias_info = _resolve_alias_display(raw_model)
238+
229239
# Normalize the model string (auto-add hf. prefix if needed)
230240
model = _normalize_hf_model(model)
231241

232-
# Validate the model string before saving to config
242+
# Validate the model string format
233243
try:
234244
ModelFactory.parse_model_string(model)
235245
except Exception as e:
236246
return f"Error: Invalid model `{model}` - {e}"
237247

238-
# Look up inference providers for this model
239-
provider_info = await _lookup_and_format_providers(model)
248+
# Validate model exists on HuggingFace and has providers
249+
validation = await validate_hf_model(model, aliases=ModelFactory.MODEL_ALIASES)
250+
if not validation.valid:
251+
return validation.error or "Error: Model validation failed"
240252

241253
try:
242254
update_model_in_config(model)
243255
applied = await self._apply_model_to_running_hf_agent(model)
244256
applied_note = "\n\nApplied to the running Hugging Face agent." if applied else ""
245-
provider_prefix = f"{provider_info}\n\n" if provider_info else ""
257+
provider_prefix = (
258+
f"{validation.display_message}\n\n" if validation.display_message else ""
259+
)
260+
if alias_info:
261+
alias_display, resolved_alias = alias_info
262+
model_status = f"Active model set to: `{alias_display}` (`{resolved_alias}`)"
263+
else:
264+
model_status = f"Default model set to: `{model}`"
246265
return (
247266
f"{provider_prefix}"
248-
f"Default model set to: `{model}`\n\nConfig file updated: `{CONFIG_FILE}`"
267+
f"{model_status}\n\nConfig file updated: `{CONFIG_FILE}`"
249268
f"{applied_note}"
250269
)
251270
except Exception as e:
@@ -311,32 +330,19 @@ async def _get_model_provider_info(self, model: str) -> str | None:
311330
312331
Returns None if providers cannot be looked up or model is not a HF model.
313332
"""
314-
from fast_agent.llm.hf_inference_lookup import lookup_inference_providers
315-
316-
# Extract the HF model ID from various formats
317-
model_id = model
318-
319-
# Strip hf. prefix if present
320-
if model_id.startswith("hf."):
321-
model_id = model_id[3:]
322-
323-
# Strip :provider suffix if present
324-
if ":" in model_id:
325-
model_id = model_id.rsplit(":", 1)[0]
333+
from fast_agent.llm.hf_inference_lookup import (
334+
format_provider_summary,
335+
lookup_inference_providers,
336+
normalize_hf_model_id,
337+
)
326338

327-
# Must have org/model format
328-
if "/" not in model_id:
339+
model_id = normalize_hf_model_id(model)
340+
if model_id is None:
329341
return None
330342

331343
try:
332344
result = await lookup_inference_providers(model_id)
333-
if result.has_providers:
334-
providers = result.format_provider_list()
335-
return f"Available providers: {providers}"
336-
elif result.exists:
337-
return "No inference providers available"
338-
else:
339-
return None
345+
return format_provider_summary(result)
340346
except Exception:
341347
return None
342348

@@ -498,29 +504,25 @@ async def _send_connect_update(
498504
await _send_connect_update(title="Connected", status="in_progress")
499505

500506
# Rebuild system prompt to include fresh server instructions
501-
await _send_connect_update(
502-
title="Rebuilding system prompt…", status="in_progress"
503-
)
504-
await self.rebuild_instruction_templates()
507+
await _send_connect_update(title="Rebuilding system prompt…", status="in_progress")
508+
await self._apply_instruction_templates()
505509

506510
# Get available tools
507511
await _send_connect_update(title="Fetching available tools…", status="in_progress")
508512
tools_result = await self._aggregator.list_tools()
509513
tool_names = [t.name for t in tools_result.tools] if tools_result.tools else []
510514

515+
# Send final progress update (but don't mark as completed yet -
516+
# the return value serves as the completion signal)
511517
if tool_names:
512-
preview = ", ".join(tool_names[:10])
513-
suffix = f" (+{len(tool_names) - 10} more)" if len(tool_names) > 10 else ""
514518
await _send_connect_update(
515-
title="Connected (tools available)",
519+
title=f"Connected ({len(tool_names)} tools)",
516520
status="completed",
517-
message=f"Available tools: {preview}{suffix}",
518521
)
519522
else:
520523
await _send_connect_update(
521-
title="Connected (no tools found)",
524+
title="Connected (no tools)",
522525
status="completed",
523-
message="No tools available from the server.",
524526
)
525527

526528
if tool_names:
@@ -543,30 +545,43 @@ async def _send_connect_update(
543545

544546
async def _handle_set_model(self, arguments: str) -> str:
545547
"""Handler for /set-model in Hugging Face mode."""
548+
from fast_agent.llm.hf_inference_lookup import validate_hf_model
546549
from fast_agent.llm.model_factory import ModelFactory
547550

548-
model = arguments.strip()
551+
raw_model = arguments.strip()
552+
model = raw_model
549553
if not model:
550554
return format_model_list_help()
551555

556+
alias_info = _resolve_alias_display(raw_model)
557+
552558
# Normalize the model string (auto-add hf. prefix if needed)
553559
model = _normalize_hf_model(model)
554560

555-
# Validate the model string before applying
561+
# Validate the model string format
556562
try:
557563
ModelFactory.parse_model_string(model)
558564
except Exception as e:
559565
return f"Error: Invalid model `{model}` - {e}"
560566

561-
# Look up inference providers for this model
562-
provider_info = await _lookup_and_format_providers(model)
567+
# Validate model exists on HuggingFace and has providers
568+
validation = await validate_hf_model(model, aliases=ModelFactory.MODEL_ALIASES)
569+
if not validation.valid:
570+
return validation.error or "Error: Model validation failed"
563571

564572
try:
565573
# Apply model first - if this fails, don't update config
566574
await self.apply_model(model)
567575
update_model_in_config(model)
568-
provider_prefix = f"{provider_info}\n\n" if provider_info else ""
569-
return f"{provider_prefix}Active model set to: `{model}`\n\nConfig file updated: `{CONFIG_FILE}`"
576+
provider_prefix = (
577+
f"{validation.display_message}\n\n" if validation.display_message else ""
578+
)
579+
if alias_info:
580+
alias_display, resolved_alias = alias_info
581+
model_status = f"Active model set to: `{alias_display}` (`{resolved_alias}`)"
582+
else:
583+
model_status = f"Active model set to: `{model}`"
584+
return f"{provider_prefix}{model_status}\n\nConfig file updated: `{CONFIG_FILE}`"
570585
except Exception as e:
571586
return f"Error setting model: {e}"
572587

publish/hf-inference-acp/src/hf_inference_acp/cli.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
import shlex
1212
import sys
1313
from pathlib import Path # noqa: TC003 - typer needs runtime access
14+
from typing import Any, cast
1415

1516
import typer
1617

@@ -220,7 +221,7 @@ async def run_agents(
220221
if skills_directory is not None:
221222
fast_kwargs["skills_directory"] = skills_directory
222223

223-
fast = FastAgent(**fast_kwargs)
224+
fast = FastAgent(**cast("Any", fast_kwargs))
224225

225226
if shell_runtime:
226227
await fast.app.initialize()

publish/hf-inference-acp/src/hf_inference_acp/hf_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def discover_hf_token(*, ignore_env: bool = False) -> tuple[str | None, str | No
5454
from huggingface_hub import get_token
5555

5656
token = get_token()
57-
return token, "huggingface_hub" if token else (None, None)
57+
return (token, "huggingface_hub") if token else (None, None)
5858
except ImportError:
5959
pass
6060

publish/hf-inference-acp/src/hf_inference_acp/wizard/model_catalog.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ def _get_model_string(alias: str) -> str:
4040
),
4141
CuratedModel(
4242
id="glm",
43-
display_name="GLM 4.6",
44-
description="ZAI GLM-4.6: Advanced Agentic, Reasoning and Coding Capabilities",
43+
display_name="GLM 4.7",
44+
description="ZAI GLM-4.7: Superior Agentic, Reasoning and Coding Capabilities",
4545
),
4646
CuratedModel(
4747
id="minimax",

publish/hf-inference-acp/src/hf_inference_acp/wizard/wizard_llm.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -414,6 +414,8 @@ async def _handle_confirm(self, user_input: str) -> str:
414414
elif cmd in ("y", "yes", "confirm", "ok", "save"):
415415
# Save configuration
416416
try:
417+
if self._state.selected_model is None:
418+
return "No model selected. Please select a model first."
417419
update_model_in_config(self._state.selected_model)
418420
update_mcp_server_load_on_start("huggingface", self._state.mcp_load_on_start)
419421
self._state.stage = WizardStage.COMPLETE

pyproject.toml

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -64,16 +64,6 @@ all-providers = [
6464
"boto3>=1.35.0",
6565
"tensorzero>=2025.7.5"
6666
]
67-
dev = [
68-
"pre-commit>=4.0.1",
69-
"pydantic>=2.10.4",
70-
"pyyaml>=6.0.2",
71-
"ruff>=0.8.4",
72-
"pytest>=7.4.0",
73-
"pytest-asyncio>=0.21.1",
74-
"pytest-cov",
75-
"ruamel.yaml>=0.18.0",
76-
]
7767

7868
[build-system]
7969
requires = ["hatchling"]
@@ -117,11 +107,13 @@ testpaths = ["tests"]
117107

118108
[dependency-groups]
119109
dev = [
110+
"boto3>=1.35.0",
120111
"pre-commit>=4.0.1",
121112
"pydantic>=2.10.4",
122113
"ruamel.yaml>=0.18.0",
123114
"pyyaml>=6.0.2",
124115
"ruff>=0.8.4",
116+
"ty>=0.0.5",
125117
"pytest>=7.4.0",
126118
"pytest-asyncio>=0.21.1",
127119
"pytest-cov>=6.1.1",

0 commit comments

Comments
 (0)