Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
15 commits
Select commit Hold shift + click to select a range
63d6012
feat: Implement agent components for context management and action ro…
ogabrielluiz Dec 17, 2024
4a832e7
fix: Include initial context at the end of the full context output
ogabrielluiz Dec 17, 2024
b59f9fe
fix: Update action logging in DecideActionComponent to store action log
ogabrielluiz Dec 17, 2024
870f050
fix: Handle invalid LLM responses in GenerateThoughtComponent by rais…
ogabrielluiz Dec 17, 2024
d77849d
fix: Correct output model instantiation in LangflowAgent to ensure pr…
ogabrielluiz Dec 17, 2024
5d0f143
test: Add unit tests for ChatInput component to verify attribute inde…
ogabrielluiz Dec 18, 2024
5065445
Revert "test: Add unit tests for ChatInput component to verify attrib…
ogabrielluiz Dec 18, 2024
772e425
fix typo in max iterations input
ogabrielluiz Dec 18, 2024
e1711b0
fix: Improve error messaging in ExecuteActionComponent for better cla…
ogabrielluiz Dec 18, 2024
3ce89e8
feat: Refactor GenerateThoughtComponent to enhance context formatting…
ogabrielluiz Dec 20, 2024
ca39572
refactor: Simplify AgentActionRouter by removing AgentContext depende…
ogabrielluiz Dec 20, 2024
21612f1
fix: Enhance ExecuteActionComponent to improve context handling and e…
ogabrielluiz Dec 20, 2024
573df87
refactor: Update DecideActionComponent to enhance context formatting …
ogabrielluiz Dec 20, 2024
e81ae10
refactor: Remove AgentContextBuilder and enhance context initializati…
ogabrielluiz Dec 20, 2024
197c6af
feat: Enhance ProvideFinalAnswerComponent to improve context formatti…
ogabrielluiz Dec 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/backend/base/langflow/base/agents/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ def _serialize_context_history_tuple(self, context_history_tuple: tuple[str, str
return f"{name}: {value}"

def get_full_context(self) -> str:
context_history_reversed = self.context_history[::-1]
initial_context = self.context_history[0][1]
context_history_reversed = self.context_history[1:][::-1]
context_formatted = "\n".join(
[
self._serialize_context_history_tuple(context_history_tuple)
Expand All @@ -106,4 +107,7 @@ def get_full_context(self) -> str:
return f"""
Context:
{context_formatted}

Initial Context:
{initial_context}
"""
83 changes: 83 additions & 0 deletions src/backend/base/langflow/components/agents/agent_action_router.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
from langchain.schema.agent import AgentFinish

from langflow.custom import Component
from langflow.io import IntInput, Output
from langflow.schema.data import Data
from langflow.schema.message import Message


class AgentActionRouter(Component):
display_name = "Agent Action Router"
description = "Routes the agent's flow based on the last action type."

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__iteration_updated = False

inputs = [
IntInput(name="max_iterations", display_name="Max Iterations", required=True, value=5),
]

outputs = [
Output(name="execute_tool", display_name="Execute Tool", method="route_to_execute_tool", cache=False),
Output(name="final_answer", display_name="Final Answer", method="route_to_final_answer", cache=False),
]

def _pre_run_setup(self):
self.__iteration_updated = False
# Initialize context if not already set
if "iteration" not in self.ctx:
self.update_ctx(
{
"iteration": 0,
"max_iterations": self.max_iterations,
"thought": "",
"last_action": None,
"last_action_result": None,
"final_answer": "",
}
)

def _get_context_message_and_route_to_stop(self) -> tuple[str, str]:
ctx = self.ctx
if isinstance(ctx.get("last_action"), AgentFinish) or ctx.get("iteration", 0) >= ctx.get(
"max_iterations", self.max_iterations
):
return "Provide Final Answer", "execute_tool"
return "Execute Tool", "final_answer"

def iterate_and_stop_once(self, route_to_stop: str):
if not self.__iteration_updated:
current_iteration = self.ctx.get("iteration", 0)
self.update_ctx({"iteration": current_iteration + 1})
self.__iteration_updated = True
self.stop(route_to_stop)

def _create_status_data(self) -> list[Data]:
ctx = self.ctx
return [
Data(
name="Agent State",
value=f"""
Iteration: {ctx.get('iteration', 0)}
Last Action: {ctx.get('last_action')}
Last Result: {ctx.get('last_action_result')}
Thought: {ctx.get('thought', '')}
Final Answer: {ctx.get('final_answer', '')}
""",
)
]

def route_to_execute_tool(self) -> Message:
context_message, route_to_stop = self._get_context_message_and_route_to_stop()
self.update_ctx({"router_decision": context_message})
self.iterate_and_stop_once(route_to_stop)
self.status = self._create_status_data()
return Message(text=context_message, type="routing_decision")

def route_to_final_answer(self) -> Message:
context_message, route_to_stop = self._get_context_message_and_route_to_stop()
self.update_ctx({"router_decision": context_message})
self.iterate_and_stop_once(route_to_stop)
self.status = self._create_status_data()
return Message(text=context_message, type="routing_decision")
27 changes: 27 additions & 0 deletions src/backend/base/langflow/components/agents/agent_context.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
from langflow.base.agents.context import AgentContext
from langflow.custom import Component
from langflow.io import HandleInput, IntInput, MessageTextInput, Output


class AgentContextBuilder(Component):
display_name = "Agent Context Builder"
description = "Builds the AgentContext instance for the agent execution loop."

inputs = [
HandleInput(name="tools", display_name="Tools", input_types=["Tool"], is_list=True, required=True),
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
MessageTextInput(name="initial_context", display_name="Initial Context", required=False),
IntInput(name="max_iterations", display_name="Max Iterations", value=5, required=False),
]

outputs = [Output(name="agent_context", display_name="Agent Context", method="build_context")]

def build_context(self) -> AgentContext:
tools = [self.tools] if self.tools and not isinstance(self.tools, list) else self.tools

tools_dict = {tool.name: tool for tool in tools}
context = AgentContext(tools=tools_dict, llm=self.llm, context=self.initial_context or "", iteration=0)
if self.max_iterations is not None:
context.max_iterations = self.max_iterations
self.status = context.to_data_repr()
return context
74 changes: 74 additions & 0 deletions src/backend/base/langflow/components/agents/decide_action.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
from typing import TYPE_CHECKING

from langchain.agents.output_parsers.tools import parse_ai_message_to_tool_action

from langflow.custom import Component
from langflow.io import MessageTextInput, Output
from langflow.schema.data import Data
from langflow.schema.message import Message

if TYPE_CHECKING:
from langchain_core.messages import AIMessage


class DecideActionComponent(Component):
display_name = "Decide Action"
description = "Decides on an action based on the current thought and context."

inputs = [
MessageTextInput(
name="prompt",
display_name="Prompt",
required=True,
value="Based on your thought, decide the best action to take next.",
),
]

outputs = [Output(name="action", display_name="Decided Action", method="decide_action")]

def _format_context(self) -> str:
ctx = self.ctx
context_parts = []

# Add current thought
if ctx.get("thought"):
context_parts.append(f"Current Thought: {ctx['thought']}")

# Add available tools
if "tools" in ctx:
context_parts.append("\nAvailable Tools:")
for tool_name, tool in ctx["tools"].items():
context_parts.append(f"- {tool_name}: {tool.description}")

return "\n".join(context_parts)

def decide_action(self) -> Message:
# Format the full context
full_prompt = f"{self._format_context()}\n{self.prompt}\nAction:"

# Generate action using LLM
response: AIMessage = self.ctx["llm"].invoke(full_prompt)
action = parse_ai_message_to_tool_action(response)

# Handle action result and update context using update_ctx
if isinstance(action, list):
self.update_ctx({"last_action": action[0]})
action = action[0]
else:
self.update_ctx({"last_action": action})

# Create status data
self.status = [
Data(
name="Decided Action",
value=f"""
Context Used:
{self._format_context()}

Decided Action:
{action.log if hasattr(action, 'log') else str(action)}
""",
)
]

return Message(text=str(action))
57 changes: 57 additions & 0 deletions src/backend/base/langflow/components/agents/execute_action.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from typing import TYPE_CHECKING, Any

from langflow.custom import Component
from langflow.io import Output
from langflow.schema.data import Data
from langflow.schema.message import Message

if TYPE_CHECKING:
from langchain_core.agents import AgentAction


class ExecuteActionComponent(Component):
display_name = "Execute Action"
description = "Executes the selected action using available tools."

outputs = [Output(name="action_result", display_name="Action Result", method="execute_action")]

def _format_result(self, result: Any) -> str:
if hasattr(result, "content"):
return result.content
if hasattr(result, "log"):
return result.log
return str(result)

def execute_action(self) -> Message:
# Get the action from context
action: AgentAction = self.ctx.get("last_action")
if not action:
msg = "No action found in context to execute"
raise ValueError(msg)

# Get tools from context
tools = self.ctx.get("tools", {})

# Execute the action using the appropriate tool
if action.tool in tools:
result = tools[action.tool](action.tool_input)
formatted_result = self._format_result(result)
self.update_ctx({"last_action_result": formatted_result})
else:
error_msg = f"Action '{action}' not found in available tools."
formatted_result = f"Error: {error_msg}"
self.update_ctx({"last_action_result": formatted_result})

# Create status data
self.status = [
Data(
name="Action Execution",
value=f"""
Tool: {action.tool}
Input: {action.tool_input}
Result: {formatted_result}
""",
)
]

return Message(text=formatted_result)
86 changes: 86 additions & 0 deletions src/backend/base/langflow/components/agents/generate_thought.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from typing import TYPE_CHECKING

from langchain.agents.output_parsers.tools import parse_ai_message_to_tool_action

from langflow.custom import Component
from langflow.io import MessageTextInput, Output
from langflow.schema.data import Data
from langflow.schema.message import Message

if TYPE_CHECKING:
from langchain_core.messages import AIMessage


class GenerateThoughtComponent(Component):
display_name = "Generate Thought"
description = "Generates a thought based on the current context."

inputs = [
MessageTextInput(
name="prompt",
display_name="Prompt",
required=True,
value="Based on the provided context, generate your next thought.",
),
]

outputs = [Output(name="thought", display_name="Generated Thought", method="generate_thought")]

def _format_context(self) -> str:
ctx = self.ctx
context_parts = []

# Add router decision if exists
if "router_decision" in ctx:
context_parts.append(f"Decision: {ctx['router_decision']}")

# Add thought if exists
if ctx.get("thought"):
context_parts.append(f"Previous Thought: {ctx['thought']}")

# Add last action and result if they exist
if ctx.get("last_action"):
context_parts.append(f"Last Action: {ctx['last_action']}")
if ctx.get("last_action_result"):
context_parts.append(f"Action Result: {ctx['last_action_result']}")

# Add iteration info
context_parts.append(f"Current Iteration: {ctx.get('iteration', 0)}/{ctx.get('max_iterations', 5)}")

return "\n".join(context_parts)

def generate_thought(self) -> Message:
# Format the full context
full_prompt = f"{self._format_context()}\n{self.prompt}\nThought:"

# Generate thought using LLM
thought: AIMessage = self.ctx["llm"].invoke(full_prompt)

if not thought.content:
action = parse_ai_message_to_tool_action(thought)
if action:
msg = (
"Invalid LLM response: An action was returned but no thought was generated. "
"The LLM should first generate a thought explaining its reasoning before taking any action. "
"Please check the prompt and LLM configuration. Maybe use a better model."
)
raise ValueError(msg)

# Update context with new thought using update_ctx
self.update_ctx({"thought": thought.content})

# Create status data
self.status = [
Data(
name="Generated Thought",
value=f"""
Context Used:
{self._format_context()}

New Thought:
{thought.content}
""",
)
]

return Message(text=thought.content)
Loading
Loading