-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathLesson4-LangGraphAgentwithTools.py
More file actions
161 lines (132 loc) · 5.46 KB
/
Lesson4-LangGraphAgentwithTools.py
File metadata and controls
161 lines (132 loc) · 5.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
from typing import Literal
from langchain_core.tools import tool
from langgraph.checkpoint.memory import InMemorySaver
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
from langgraph.graph import MessagesState, StateGraph, START, END
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from serpapi import GoogleSearch
from firecrawl import Firecrawl # Firecrawl SDK
# ----------------- ENV -----------------
load_dotenv()
groq_api_key = os.getenv("GROQ_API_KEY")
groq_base_url = os.getenv("GROQ_BASE_URL")
serp_api_key = os.getenv("SERP_API_KEY")
firecrawl_api_key = os.getenv("FIRECRAWL_API_KEY")
if not groq_api_key:
raise ValueError("GROQ_API_KEY not found in .env file")
if not groq_base_url:
raise ValueError("GROQ_BASE_URL not found in .env file")
if not serp_api_key:
raise ValueError("SERP_API_KEY not found in .env file")
if not firecrawl_api_key:
raise ValueError("FIRECRAWL_API_KEY not found in .env file")
# ----------------- LLM -----------------
llm = ChatOpenAI(
model="meta-llama/llama-4-scout-17b-16e-instruct",
api_key=groq_api_key,
base_url=groq_base_url,
temperature=0.5,
)
# ----------------- Firecrawl -----------------
firecrawl = Firecrawl(api_key=firecrawl_api_key)
# ----------------- TOOLS -----------------
@tool
def knowledge_graph_search(query: str) -> dict:
"""Extract specific information from Google's Knowledge Graph for reliable facts."""
params = {"engine": "google", "q": query, "api_key": serp_api_key}
search = GoogleSearch(params)
results = search.get_dict()
knowledge_graph = results.get("knowledge_graph", None)
if knowledge_graph:
return {"knowledge_graph": knowledge_graph}
else:
return {"error": "No Knowledge Graph data found."}
@tool
def organic_google_search(query: str) -> dict:
"""Perform a general organic Google search. Returns top 6 results if found."""
params = {"engine": "google", "q": query, "api_key": serp_api_key}
search = GoogleSearch(params)
results = search.get_dict()
organic_results = results.get("organic_results", None)
if organic_results:
return {"results": organic_results[:6]}
else:
return {"error": "No general search results found."}
@tool
def firecrawl_scrape(url: str) -> dict:
"""Scrape webpage content into a concise summary using Firecrawl."""
try:
doc = firecrawl.scrape(url, formats=["summary"])
return {
"url": url,
"summary": getattr(doc, "summary", None),
"metadata": getattr(doc, "metadata", None),
}
except Exception as e:
return {"error": str(e)}
# Register tools
tools = [knowledge_graph_search, organic_google_search, firecrawl_scrape]
tools_by_name = {t.name: t for t in tools}
# ----------------- LLM WITH TOOLS -----------------
llm_with_tools = llm.bind_tools(tools)
# ----------------- GRAPH NODES -----------------
def llm_call(state: MessagesState):
"""LLM decides whether to call a tool or stop."""
messages = [SystemMessage(content="""You are a research assistant.
Workflow:
1. Always attempt `knowledge_graph_search` first.
2. If knowledge graph content is available, use it directly.
3. If not available or an error occurs, perform `organic_google_search`.
4. From the search results, carefully select the **most relevant** URL (not just the first one).
5. Call `firecrawl_scrape` on the chosen URL to extract content.
Use this workflow strictly in order. Do not skip steps unless explicitly instructed.
""")] + state["messages"]
response = llm_with_tools.invoke(messages)
return {"messages": [response]}
def tool_node(state: dict):
"""Executes tool calls made by the LLM."""
results = []
for tool_call in state["messages"][-1].tool_calls:
tool = tools_by_name[tool_call["name"]]
observation = tool.invoke(tool_call["args"])
results.append(ToolMessage(content=str(observation), tool_call_id=tool_call["id"]))
return {"messages": results}
def should_continue(state: MessagesState) -> Literal["Action", END]:
last_message = state["messages"][-1]
if getattr(last_message, "tool_calls", None):
return "Action"
return END
# ----------------- GRAPH -----------------
agent_builder = StateGraph(MessagesState)
agent_builder.add_node("llm_call", llm_call)
agent_builder.add_node("environment", tool_node)
agent_builder.add_edge(START, "llm_call")
agent_builder.add_conditional_edges("llm_call", should_continue, {"Action": "environment", END: END})
agent_builder.add_edge("environment", "llm_call")
# Attach memory
memory = InMemorySaver()
agent = agent_builder.compile(checkpointer=memory)
# Choose a thread id for this interactive run
THREAD_ID = "chat-session"
# ----------------- INTERACTIVE LOOP -----------------
def chat_loop():
while True:
user_input = input("\nPhil: ")
if user_input.lower() in ["quit", "exit", "q"]:
print("Goodbye!")
break
# Build state
state = {"messages": [HumanMessage(content=user_input)]}
# 🔑 Invoke with thread_id so memory sticks
result = agent.invoke(
state,
config={"configurable": {"thread_id": THREAD_ID}},
)
# ✅ Clean terminal output: only final AI response
final_response = result["messages"][-1].content
print("\nAI:", final_response, "\n")
if __name__ == "__main__":
print("🤖 Type your research query (or 'quit' to exit).")
chat_loop()