diff --git a/src/backend/base/langflow/base/agents/context.py b/src/backend/base/langflow/base/agents/context.py index 8e4961ecc579..7a4da8c1f759 100644 --- a/src/backend/base/langflow/base/agents/context.py +++ b/src/backend/base/langflow/base/agents/context.py @@ -96,7 +96,8 @@ def _serialize_context_history_tuple(self, context_history_tuple: tuple[str, str return f"{name}: {value}" def get_full_context(self) -> str: - context_history_reversed = self.context_history[::-1] + initial_context = self.context_history[0][1] + context_history_reversed = self.context_history[1:][::-1] context_formatted = "\n".join( [ self._serialize_context_history_tuple(context_history_tuple) @@ -106,4 +107,7 @@ def get_full_context(self) -> str: return f""" Context: {context_formatted} + +Initial Context: +{initial_context} """ diff --git a/src/backend/base/langflow/components/agents/agent_action_router.py b/src/backend/base/langflow/components/agents/agent_action_router.py new file mode 100644 index 000000000000..f5910aca1964 --- /dev/null +++ b/src/backend/base/langflow/components/agents/agent_action_router.py @@ -0,0 +1,83 @@ +from langchain.schema.agent import AgentFinish + +from langflow.custom import Component +from langflow.io import IntInput, Output +from langflow.schema.data import Data +from langflow.schema.message import Message + + +class AgentActionRouter(Component): + display_name = "Agent Action Router" + description = "Routes the agent's flow based on the last action type." + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.__iteration_updated = False + + inputs = [ + IntInput(name="max_iterations", display_name="Max Iterations", required=True, value=5), + ] + + outputs = [ + Output(name="execute_tool", display_name="Execute Tool", method="route_to_execute_tool", cache=False), + Output(name="final_answer", display_name="Final Answer", method="route_to_final_answer", cache=False), + ] + + def _pre_run_setup(self): + self.__iteration_updated = False + # Initialize context if not already set + if "iteration" not in self.ctx: + self.update_ctx( + { + "iteration": 0, + "max_iterations": self.max_iterations, + "thought": "", + "last_action": None, + "last_action_result": None, + "final_answer": "", + } + ) + + def _get_context_message_and_route_to_stop(self) -> tuple[str, str]: + ctx = self.ctx + if isinstance(ctx.get("last_action"), AgentFinish) or ctx.get("iteration", 0) >= ctx.get( + "max_iterations", self.max_iterations + ): + return "Provide Final Answer", "execute_tool" + return "Execute Tool", "final_answer" + + def iterate_and_stop_once(self, route_to_stop: str): + if not self.__iteration_updated: + current_iteration = self.ctx.get("iteration", 0) + self.update_ctx({"iteration": current_iteration + 1}) + self.__iteration_updated = True + self.stop(route_to_stop) + + def _create_status_data(self) -> list[Data]: + ctx = self.ctx + return [ + Data( + name="Agent State", + value=f""" +Iteration: {ctx.get('iteration', 0)} +Last Action: {ctx.get('last_action')} +Last Result: {ctx.get('last_action_result')} +Thought: {ctx.get('thought', '')} +Final Answer: {ctx.get('final_answer', '')} +""", + ) + ] + + def route_to_execute_tool(self) -> Message: + context_message, route_to_stop = self._get_context_message_and_route_to_stop() + self.update_ctx({"router_decision": context_message}) + self.iterate_and_stop_once(route_to_stop) + self.status = self._create_status_data() + return Message(text=context_message, type="routing_decision") + + def route_to_final_answer(self) -> Message: + context_message, route_to_stop = self._get_context_message_and_route_to_stop() + self.update_ctx({"router_decision": context_message}) + self.iterate_and_stop_once(route_to_stop) + self.status = self._create_status_data() + return Message(text=context_message, type="routing_decision") diff --git a/src/backend/base/langflow/components/agents/agent_context.py b/src/backend/base/langflow/components/agents/agent_context.py new file mode 100644 index 000000000000..0ec44fe27a8b --- /dev/null +++ b/src/backend/base/langflow/components/agents/agent_context.py @@ -0,0 +1,27 @@ +from langflow.base.agents.context import AgentContext +from langflow.custom import Component +from langflow.io import HandleInput, IntInput, MessageTextInput, Output + + +class AgentContextBuilder(Component): + display_name = "Agent Context Builder" + description = "Builds the AgentContext instance for the agent execution loop." + + inputs = [ + HandleInput(name="tools", display_name="Tools", input_types=["Tool"], is_list=True, required=True), + HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True), + MessageTextInput(name="initial_context", display_name="Initial Context", required=False), + IntInput(name="max_iterations", display_name="Max Iterations", value=5, required=False), + ] + + outputs = [Output(name="agent_context", display_name="Agent Context", method="build_context")] + + def build_context(self) -> AgentContext: + tools = [self.tools] if self.tools and not isinstance(self.tools, list) else self.tools + + tools_dict = {tool.name: tool for tool in tools} + context = AgentContext(tools=tools_dict, llm=self.llm, context=self.initial_context or "", iteration=0) + if self.max_iterations is not None: + context.max_iterations = self.max_iterations + self.status = context.to_data_repr() + return context diff --git a/src/backend/base/langflow/components/agents/decide_action.py b/src/backend/base/langflow/components/agents/decide_action.py new file mode 100644 index 000000000000..7fbf3a3015f2 --- /dev/null +++ b/src/backend/base/langflow/components/agents/decide_action.py @@ -0,0 +1,74 @@ +from typing import TYPE_CHECKING + +from langchain.agents.output_parsers.tools import parse_ai_message_to_tool_action + +from langflow.custom import Component +from langflow.io import MessageTextInput, Output +from langflow.schema.data import Data +from langflow.schema.message import Message + +if TYPE_CHECKING: + from langchain_core.messages import AIMessage + + +class DecideActionComponent(Component): + display_name = "Decide Action" + description = "Decides on an action based on the current thought and context." + + inputs = [ + MessageTextInput( + name="prompt", + display_name="Prompt", + required=True, + value="Based on your thought, decide the best action to take next.", + ), + ] + + outputs = [Output(name="action", display_name="Decided Action", method="decide_action")] + + def _format_context(self) -> str: + ctx = self.ctx + context_parts = [] + + # Add current thought + if ctx.get("thought"): + context_parts.append(f"Current Thought: {ctx['thought']}") + + # Add available tools + if "tools" in ctx: + context_parts.append("\nAvailable Tools:") + for tool_name, tool in ctx["tools"].items(): + context_parts.append(f"- {tool_name}: {tool.description}") + + return "\n".join(context_parts) + + def decide_action(self) -> Message: + # Format the full context + full_prompt = f"{self._format_context()}\n{self.prompt}\nAction:" + + # Generate action using LLM + response: AIMessage = self.ctx["llm"].invoke(full_prompt) + action = parse_ai_message_to_tool_action(response) + + # Handle action result and update context using update_ctx + if isinstance(action, list): + self.update_ctx({"last_action": action[0]}) + action = action[0] + else: + self.update_ctx({"last_action": action}) + + # Create status data + self.status = [ + Data( + name="Decided Action", + value=f""" +Context Used: +{self._format_context()} + +Decided Action: +{action.log if hasattr(action, 'log') else str(action)} +""", + ) + ] + + return Message(text=str(action)) diff --git a/src/backend/base/langflow/components/agents/execute_action.py b/src/backend/base/langflow/components/agents/execute_action.py new file mode 100644 index 000000000000..affc72d380f5 --- /dev/null +++ b/src/backend/base/langflow/components/agents/execute_action.py @@ -0,0 +1,57 @@ +from typing import TYPE_CHECKING, Any + +from langflow.custom import Component +from langflow.io import Output +from langflow.schema.data import Data +from langflow.schema.message import Message + +if TYPE_CHECKING: + from langchain_core.agents import AgentAction + + +class ExecuteActionComponent(Component): + display_name = "Execute Action" + description = "Executes the selected action using available tools." + + outputs = [Output(name="action_result", display_name="Action Result", method="execute_action")] + + def _format_result(self, result: Any) -> str: + if hasattr(result, "content"): + return result.content + if hasattr(result, "log"): + return result.log + return str(result) + + def execute_action(self) -> Message: + # Get the action from context + action: AgentAction = self.ctx.get("last_action") + if not action: + msg = "No action found in context to execute" + raise ValueError(msg) + + # Get tools from context + tools = self.ctx.get("tools", {}) + + # Execute the action using the appropriate tool + if action.tool in tools: + result = tools[action.tool](action.tool_input) + formatted_result = self._format_result(result) + self.update_ctx({"last_action_result": formatted_result}) + else: + error_msg = f"Action '{action}' not found in available tools." + formatted_result = f"Error: {error_msg}" + self.update_ctx({"last_action_result": formatted_result}) + + # Create status data + self.status = [ + Data( + name="Action Execution", + value=f""" +Tool: {action.tool} +Input: {action.tool_input} +Result: {formatted_result} +""", + ) + ] + + return Message(text=formatted_result) diff --git a/src/backend/base/langflow/components/agents/generate_thought.py b/src/backend/base/langflow/components/agents/generate_thought.py new file mode 100644 index 000000000000..fd59831fc73e --- /dev/null +++ b/src/backend/base/langflow/components/agents/generate_thought.py @@ -0,0 +1,86 @@ +from typing import TYPE_CHECKING + +from langchain.agents.output_parsers.tools import parse_ai_message_to_tool_action + +from langflow.custom import Component +from langflow.io import MessageTextInput, Output +from langflow.schema.data import Data +from langflow.schema.message import Message + +if TYPE_CHECKING: + from langchain_core.messages import AIMessage + + +class GenerateThoughtComponent(Component): + display_name = "Generate Thought" + description = "Generates a thought based on the current context." + + inputs = [ + MessageTextInput( + name="prompt", + display_name="Prompt", + required=True, + value="Based on the provided context, generate your next thought.", + ), + ] + + outputs = [Output(name="thought", display_name="Generated Thought", method="generate_thought")] + + def _format_context(self) -> str: + ctx = self.ctx + context_parts = [] + + # Add router decision if exists + if "router_decision" in ctx: + context_parts.append(f"Decision: {ctx['router_decision']}") + + # Add thought if exists + if ctx.get("thought"): + context_parts.append(f"Previous Thought: {ctx['thought']}") + + # Add last action and result if they exist + if ctx.get("last_action"): + context_parts.append(f"Last Action: {ctx['last_action']}") + if ctx.get("last_action_result"): + context_parts.append(f"Action Result: {ctx['last_action_result']}") + + # Add iteration info + context_parts.append(f"Current Iteration: {ctx.get('iteration', 0)}/{ctx.get('max_iterations', 5)}") + + return "\n".join(context_parts) + + def generate_thought(self) -> Message: + # Format the full context + full_prompt = f"{self._format_context()}\n{self.prompt}\nThought:" + + # Generate thought using LLM + thought: AIMessage = self.ctx["llm"].invoke(full_prompt) + + if not thought.content: + action = parse_ai_message_to_tool_action(thought) + if action: + msg = ( + "Invalid LLM response: An action was returned but no thought was generated. " + "The LLM should first generate a thought explaining its reasoning before taking any action. " + "Please check the prompt and LLM configuration. Maybe use a better model." + ) + raise ValueError(msg) + + # Update context with new thought using update_ctx + self.update_ctx({"thought": thought.content}) + + # Create status data + self.status = [ + Data( + name="Generated Thought", + value=f""" +Context Used: +{self._format_context()} + +New Thought: +{thought.content} +""", + ) + ] + + return Message(text=thought.content) diff --git a/src/backend/base/langflow/components/agents/new_agent.py b/src/backend/base/langflow/components/agents/new_agent.py new file mode 100644 index 000000000000..a46260f2d155 --- /dev/null +++ b/src/backend/base/langflow/components/agents/new_agent.py @@ -0,0 +1,111 @@ +from loguru import logger + +from langflow.components.agents.agent_action_router import AgentActionRouter +from langflow.components.agents.decide_action import DecideActionComponent +from langflow.components.agents.execute_action import ExecuteActionComponent +from langflow.components.agents.generate_thought import GenerateThoughtComponent +from langflow.components.agents.write_final_answer import ProvideFinalAnswerComponent +from langflow.components.inputs.chat import ChatInput +from langflow.components.outputs import ChatOutput +from langflow.components.prompts import PromptComponent +from langflow.custom import Component +from langflow.graph.graph.base import Graph +from langflow.graph.state.model import create_state_model +from langflow.io import BoolInput, HandleInput, IntInput, MessageTextInput, MultilineInput, Output +from langflow.schema.message import Message + + +class LangflowAgent(Component): + display_name = "Langflow Agent" + description = "Customizable Agent component" + + inputs = [ + HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True), + HandleInput(name="tools", display_name="Tools", input_types=["Tool"], is_list=True, required=True), + IntInput(name="max_iterations", display_name="Max Iterations", value=5), + BoolInput(name="verbose", display_name="Verbose", value=False), + MultilineInput(name="system_prompt", display_name="System Prompt", value="You are a helpful assistant."), + MultilineInput(name="user_prompt", display_name="User Prompt", value="{input}"), + MultilineInput( + name="loop_prompt", + display_name="Loop Prompt", + value="Last Action Result: {last_action_result}\nBased on the actions taken, here's the final answer:", + ), + MessageTextInput( + name="decide_action_prompt", + display_name="Decide Action Prompt", + value="Based on your thought, decide the best action to take next.", + advanced=True, + ), + MessageTextInput( + name="final_answer_prompt", + display_name="Final Answer Prompt", + value="Considering all observations, provide the final answer to the user's query.", + advanced=True, + ), + ] + outputs = [Output(name="response", display_name="Response", method="get_response")] + + async def get_response(self) -> Message: + # Chat input initialization + chat_input = ChatInput().set(input_value=self.user_prompt) + + # Generate Thought + generate_thought = GenerateThoughtComponent().set( + prompt="Based on the provided context, generate your next thought.", + ) + + # Decide Action + decide_action = DecideActionComponent().set( + agent_context=generate_thought.generate_thought, + prompt=self.decide_action_prompt, + ) + + # Agent Action Router + action_router = AgentActionRouter().set( + agent_context=decide_action.decide_action, + max_iterations=self.max_iterations, + ) + + # Execute Action + execute_action = ExecuteActionComponent().set(agent_context=action_router.route_to_execute_tool) + # Loop Prompt + loop_prompt = PromptComponent().set( + template=self.loop_prompt, + answer=execute_action.execute_action, + ) + + generate_thought.set(prompt=loop_prompt.build_prompt) + + # Final Answer + final_answer = ProvideFinalAnswerComponent().set( + agent_context=action_router.route_to_final_answer, + prompt=self.final_answer_prompt, + ) + + # Chat output + chat_output = ChatOutput().set(input_value=final_answer.get_final_answer) + agent_output_model = create_state_model("AgentOutput", output=chat_output.message_response) + output_model = agent_output_model() + + # Build the graph + graph = Graph(chat_input, chat_output) + # Initialize the context + graph.context = { + "llm": self.llm, + "tools": self.tools, + "initial_message": chat_input.message_response, + "system_prompt": self.system_prompt, + "max_iterations": self.max_iterations, + "iteration": 0, + "thought": "", + "last_action": None, + "last_action_result": None, + "final_answer": "", + } + + async for result in graph.async_start(max_iterations=self.max_iterations): + if self.verbose: + logger.info(result) + + return output_model.output diff --git a/src/backend/base/langflow/components/agents/write_final_answer.py b/src/backend/base/langflow/components/agents/write_final_answer.py new file mode 100644 index 000000000000..d3eaf8b73962 --- /dev/null +++ b/src/backend/base/langflow/components/agents/write_final_answer.py @@ -0,0 +1,71 @@ +from typing import TYPE_CHECKING + +from langflow.custom import Component +from langflow.io import MessageTextInput, Output +from langflow.schema.data import Data +from langflow.schema.message import Message + +if TYPE_CHECKING: + from langchain_core.messages import AIMessage + + +class ProvideFinalAnswerComponent(Component): + display_name = "Provide Final Answer" + description = "Provides a final answer based on the context and actions taken." + + inputs = [ + MessageTextInput( + name="prompt", + display_name="Prompt", + required=True, + value="Considering all observations, provide the final answer to the user's query.", + ), + ] + + outputs = [Output(name="final_answer", display_name="Final Answer", method="get_final_answer")] + + def _format_context(self) -> str: + ctx = self.ctx + context_parts = [] + + # Add thought if exists + if ctx.get("thought"): + context_parts.append(f"Last Thought: {ctx['thought']}") + + # Add last action and result if they exist + if ctx.get("last_action"): + context_parts.append(f"Last Action: {ctx['last_action']}") + if ctx.get("last_action_result"): + context_parts.append(f"Action Result: {ctx['last_action_result']}") + + # Add initial message for context + if ctx.get("initial_message"): + context_parts.append(f"\nInitial Query: {ctx['initial_message']}") + + return "\n".join(context_parts) + + def get_final_answer(self) -> Message: + # Format the full context + full_prompt = f"{self._format_context()}\n{self.prompt}\nFinal Answer:" + + # Generate final answer using LLM + response: AIMessage = self.ctx["llm"].invoke(full_prompt) + + # Update context with final answer + self.update_ctx({"final_answer": response.content}) + + # Create status data + self.status = [ + Data( + name="Final Answer", + value=f""" +Context Used: +{self._format_context()} + +Final Answer: +{response.content} +""", + ) + ] + + return Message(text=response.content)