Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions agents/generative-interface/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
FROM python:3.13-alpine3.22
ARG RELEASE_VERSION="main"
COPY ./agents/generative-interface/ /app/agents/generative-interface
COPY ./apps/agentstack-sdk-py/ /app/apps/agentstack-sdk-py/
WORKDIR /app/agents/generative-interface
RUN --mount=type=cache,target=/tmp/.cache/uv \
--mount=type=bind,from=ghcr.io/astral-sh/uv:0.9.5,source=/uv,target=/bin/uv \
UV_COMPILE_BYTECODE=1 HOME=/tmp uv sync
ENV PRODUCTION_MODE=True \
RELEASE_VERSION=${RELEASE_VERSION}
CMD ["/app/agents/generative-interface/.venv/bin/server"]
36 changes: 36 additions & 0 deletions agents/generative-interface/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
[project]
name = "generative-interface"
version = "0.1.0"
description = "Agent demonstrating generative interface extension."
authors = [
{ name = "IBM Corp." },
]
requires-python = ">=3.13,<3.14"
dependencies = [
"agentstack-sdk",
"openinference-instrumentation-beeai>=0.1.14",
"pydantic-settings>=2.9.0",
"uvicorn>=0.35.0",
]

[tool.ruff]
line-length = 120

[tool.uv.sources]
agentstack-sdk = { path = "../../apps/agentstack-sdk-py", editable = true }

[project.scripts]
server = "generative_interface.agent:serve"

[build-system]
requires = ["uv_build>=0.9.0,<0.10.0"]
build-backend = "uv_build"

[dependency-groups]
dev = [
"watchfiles>=1.1.0",
]

[tool.pyright]
venvPath = "."
venv = ".venv"
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Copyright 2025 © BeeAI a Series of LF Projects, LLC
# SPDX-License-Identifier: Apache-2.0

from .agent import serve

__all__ = ["serve"]
136 changes: 136 additions & 0 deletions agents/generative-interface/src/generative_interface/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
# Copyright 2025 © BeeAI a Series of LF Projects, LLC
# SPDX-License-Identifier: Apache-2.0

import json
import os
from typing import Annotated

from a2a.types import Message, TextPart
from agentstack_sdk.a2a.extensions import (
AgentDetail,
AgentDetailContributor,
LLMServiceExtensionServer,
LLMServiceExtensionSpec,
)
from agentstack_sdk.a2a.extensions.services.generative_interface import (
GenerativeInterfaceExtensionServer,
GenerativeInterfaceExtensionSpec,
GenerativeInterfaceSpec,
)
from agentstack_sdk.server import Server
from agentstack_sdk.server.context import RunContext
from agentstack_sdk.server.middleware.platform_auth_backend import PlatformAuthBackend
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageParam

server = Server()

AGENT_GOAL = """You are a fortune teller, you need to first ask user whether they agree to know their fortune, otherwise reject. If agreed tell something funny."""


def _get_text(message: Message) -> str:
return "\n\n".join(part.root.text for part in message.parts or [] if isinstance(part.root, TextPart))


@server.agent(
name="Generative Interface Agent",
documentation_url=f"https://github.com/i-am-bee/agentstack/blob/{os.getenv('RELEASE_VERSION', 'main')}/agents/generative-interface",
version="1.0.0",
default_input_modes=["text", "text/plain"],
default_output_modes=["text", "text/plain"],
description="Financial advisor with dynamic UI generation",
detail=AgentDetail(
interaction_mode="multi-turn",
author=AgentDetailContributor(name="IBM"),
),
)
async def agent(
message: Message,
context: RunContext,
ui: Annotated[GenerativeInterfaceExtensionServer, GenerativeInterfaceExtensionSpec.demand()],
llm: Annotated[LLMServiceExtensionServer, LLMServiceExtensionSpec.single_demand()],
):
await context.store(message)

(llm_config,) = llm.data.llm_fulfillments.values()
client = AsyncOpenAI(
api_key=llm_config.api_key,
base_url=llm_config.api_base,
)

system_prompt = f"""{ui.catalog_prompt}

You are a fortune teller, you need to first ask user whether they agree to know their fortune, otherwise reject. If agreed tell something funny
"""

history = context.load_history()
llm_messages: list[ChatCompletionMessageParam] = [{"role": "system", "content": system_prompt}]

async for item in history:
if isinstance(item, Message):
if content := _get_text(item):
role = "assistant" if item.role == "agent" else "user"
llm_messages.append({"role": role, "content": content})

while True:
response = await client.chat.completions.create(
model=llm_config.api_model,
messages=llm_messages,
)

assistant_content = response.choices[0].message.content or ""
llm_messages.append({"role": "assistant", "content": assistant_content})

generative_ui = parse_generative_ui(assistant_content)

if not generative_ui:
yield assistant_content
break

ui_response = await ui.request_ui(spec=generative_ui)
if not ui_response:
break

user_response = f"User selected: {ui_response.component_id}"
llm_messages.append({"role": "user", "content": user_response})


def parse_generative_ui(content: str) -> GenerativeInterfaceSpec | None:
spec: dict = {"root": "", "elements": {}}

for line in content.strip().split("\n"):
line = line.strip()
if not line:
continue
try:
patch = json.loads(line)
if patch.get("op") == "set":
path = patch.get("path", "")
value = patch.get("value")
if path == "/root":
spec["root"] = value
elif path.startswith("/elements/"):
key = path[len("/elements/"):]
spec["elements"][key] = value
except json.JSONDecodeError:
continue
Comment on lines +115 to +116
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The try...except json.JSONDecodeError: continue block silently ignores any lines from the LLM output that are not valid JSON. While this makes the parsing robust, it can make debugging difficult if the LLM consistently produces malformed output. Consider adding a log statement within the except block to record these errors for easier debugging.

Suggested change
except json.JSONDecodeError:
continue
except json.JSONDecodeError:
# Consider logging this error for easier debugging of LLM output.
continue


if spec["root"] and spec["elements"]:
return GenerativeInterfaceSpec.model_validate(spec)
return None


def serve():
try:
server.run(
host=os.getenv("HOST", "127.0.0.1"),
port=int(os.getenv("PORT", 10001)),
configure_telemetry=True,
auth_backend=PlatformAuthBackend(),
)
except KeyboardInterrupt:
pass


if __name__ == "__main__":
serve()
Loading
Loading