diff --git a/memory_agents/ai_consultant_agent/README.md b/memory_agents/ai_consultant_agent/README.md
index f269eff..2f8d2c7 100644
--- a/memory_agents/ai_consultant_agent/README.md
+++ b/memory_agents/ai_consultant_agent/README.md
@@ -2,14 +2,14 @@
## AI Consultant Agent with Memori
-An AI-powered consulting agent that uses **Memori v3** as a long-term memory fabric and **ExaAI** for research. Built with Streamlit for the UI.
+An AI-powered consulting agent that uses **Memori v3** as a long-term memory fabric and **Tavily** for research. Built with Streamlit for the UI.
## Features
- π§ **AI Readiness Assessment**: Analyze a companyβs AI maturity, goals, and constraints.
- π― **Use-Case Recommendations**: Suggest where to integrate AI (workflows, CX, analytics, product, ecosystem).
- π΅ **Cost Bands**: Provide rough cost bands and complexity for proposed AI initiatives.
-- βοΈ **Web / Case-Study Research**: Use **ExaAI** to pull in relevant case studies and industry examples.
+- βοΈ **Web / Case-Study Research**: Use **Tavily** to pull in relevant case studies and industry examples.
- π§Ύ **Persistent Memory (Memori v3)**: Store and reuse context across assessments and follow-up questions.
## Prerequisites
@@ -17,7 +17,7 @@ An AI-powered consulting agent that uses **Memori v3** as a long-term memory fab
- Python 3.11 or higher
- [uv](https://github.com/astral-sh/uv) package manager (fast Python package installer)
- OpenAI API key (`OPENAI_API_KEY`)
-- ExaAI API key (`EXA_API_KEY`)
+- Tavily API key (`TAVILY_API_KEY`)
- Memori API key (`MEMORI_API_KEY`)
- (Optional) `SQLITE_DB_PATH` if you want to override the default `./memori.sqlite` path
@@ -65,7 +65,7 @@ Create a `.env` file in this directory:
```bash
OPENAI_API_KEY=your_openai_api_key_here
-EXA_API_KEY=your_exa_api_key_here
+TAVILY_API_KEY=your_tavily_api_key_here
# Optional:
# SQLITE_DB_PATH=./memori.sqlite
```
@@ -113,7 +113,7 @@ In the UI you can:
```text
ai_consultant_agent/
βββ app.py # Streamlit interface (assessment + memory tabs)
-βββ workflow.py # ExaAI research + OpenAI-based consulting workflow
+βββ workflow.py # Tavily research + OpenAI-based consulting workflow
βββ pyproject.toml # Project dependencies (uv format)
βββ README.md # This file
βββ requirements.txt # PIP-style dependency list
diff --git a/memory_agents/ai_consultant_agent/app.py b/memory_agents/ai_consultant_agent/app.py
index 59b5ecf..8eea19c 100644
--- a/memory_agents/ai_consultant_agent/app.py
+++ b/memory_agents/ai_consultant_agent/app.py
@@ -40,19 +40,15 @@ def _load_inline_image(path: str, height_px: int) -> str:
# Reuse existing logos from other agents
-memori_img_inline = _load_inline_image(
- "../job_search_agent/assets/Memori_Logo.png", height_px=90
-)
-exa_img_inline = _load_inline_image(
- "../job_search_agent/assets/exa_logo.png", height_px=70
-)
+memori_img_inline = _load_inline_image("assets/Memori_Logo.png", height_px=90)
+tavily_img_inline = _load_inline_image("assets/tavily_logo.png", height_px=70)
title_html = f"""
AI Consultant Agent with
{memori_img_inline}and
- {exa_img_inline}
+ {tavily_img_inline}
"""
@@ -76,11 +72,11 @@ def _load_inline_image(path: str, height_px: int) -> str:
help="Used for Memori Advanced Augmentation and higher quotas.",
)
- exa_api_key_input = st.text_input(
- "ExaAI API Key",
- value=os.getenv("EXA_API_KEY", ""),
+ tavily_api_key_input = st.text_input(
+ "Tavily API Key",
+ value=os.getenv("TAVILY_API_KEY", ""),
type="password",
- help="Your ExaAI API key for web/case-study search",
+ help="Your Tavily API key for web/case-study search",
)
if st.button("Save API Keys"):
@@ -88,14 +84,14 @@ def _load_inline_image(path: str, height_px: int) -> str:
os.environ["OPENAI_API_KEY"] = openai_api_key_input
if memori_api_key_input:
os.environ["MEMORI_API_KEY"] = memori_api_key_input
- if exa_api_key_input:
- os.environ["EXA_API_KEY"] = exa_api_key_input
- if openai_api_key_input or exa_api_key_input or memori_api_key_input:
+ if tavily_api_key_input:
+ os.environ["TAVILY_API_KEY"] = tavily_api_key_input
+ if openai_api_key_input or tavily_api_key_input or memori_api_key_input:
st.success("β
API keys saved for this session")
else:
st.warning("Please enter at least one API key")
- both_keys_present = bool(os.getenv("EXA_API_KEY")) and bool(
+ both_keys_present = bool(os.getenv("TAVILY_API_KEY")) and bool(
os.getenv("OPENAI_API_KEY")
)
if both_keys_present:
@@ -107,13 +103,13 @@ def _load_inline_image(path: str, height_px: int) -> str:
st.markdown("### π‘ About")
st.markdown(
"""
- This application acts as an **AI consultant** for companies:
- - Assesses **AI readiness** and where to integrate AI.
- - Suggests **use cases** across workforce, tools, and ecosystem.
- - Provides rough **cost bands** and risks.
- - Uses **Memori** + to remember past assessments and Q&A.
+ This application acts as an *AI consultant* for companies:
+ - Assesses *AI readiness* and where to integrate AI.
+ - Suggests *use cases* across workforce, tools, and ecosystem.
+ - Provides rough *cost bands* and risks.
+ - Uses *Memori* + to remember past assessments and Q&A.
- Web research is powered by **ExaAI**, and reasoning is powered by **OpenAI** via Memori.
+ Web research is powered by *Tavily, and reasoning is powered by **OpenAI* via Memori.
---
@@ -122,7 +118,7 @@ def _load_inline_image(path: str, height_px: int) -> str:
)
# Get API keys from environment
-exa_key = os.getenv("EXA_API_KEY", "")
+tavily_key = os.getenv("TAVILY_API_KEY", "")
# Initialize session state
if "assessment_markdown" not in st.session_state:
@@ -166,8 +162,8 @@ def _load_inline_image(path: str, height_px: int) -> str:
st.warning(f"Memori v3 initialization note: {str(e)}")
# Check if keys are set for required services
-if not exa_key:
- st.warning("β οΈ Please enter your ExaAI API key in the sidebar to run assessments!")
+if not tavily_key:
+ st.warning("β οΈ Please enter your Tavily API key in the sidebar to run assessments!")
st.stop()
if "openai_client" not in st.session_state:
st.warning(
@@ -285,7 +281,7 @@ def _load_inline_image(path: str, height_px: int) -> str:
st.session_state.company_profile = profile
st.markdown(
- f"## π§Ύ AI Readiness & Cost Assessment for **{profile.company_name}**"
+ f"## π§Ύ AI Readiness & Cost Assessment for *{profile.company_name}*"
)
st.markdown(assessment_markdown)
@@ -299,7 +295,7 @@ def _load_inline_image(path: str, height_px: int) -> str:
st.markdown(
"### Last Assessment Result "
+ (
- f"for **{st.session_state.company_profile.company_name}**"
+ f"for *{st.session_state.company_profile.company_name}*"
if st.session_state.company_profile
else ""
)
@@ -311,12 +307,12 @@ def _load_inline_image(path: str, height_px: int) -> str:
if st.session_state.company_profile:
st.info(
- f"Most recent company: **{st.session_state.company_profile.company_name}** "
+ f"Most recent company: *{st.session_state.company_profile.company_name}* "
f"({st.session_state.company_profile.industry})"
)
else:
st.info(
- "Run at least one assessment in the **AI Assessment** tab to ground the memory context."
+ "Run at least one assessment in the *AI Assessment* tab to ground the memory context."
)
for message in st.session_state.memory_messages:
diff --git a/memory_agents/ai_consultant_agent/assets/Memori_Logo.png b/memory_agents/ai_consultant_agent/assets/Memori_Logo.png
new file mode 100644
index 0000000..4b4416b
Binary files /dev/null and b/memory_agents/ai_consultant_agent/assets/Memori_Logo.png differ
diff --git a/memory_agents/ai_consultant_agent/assets/tavily_logo.png b/memory_agents/ai_consultant_agent/assets/tavily_logo.png
new file mode 100644
index 0000000..5e35234
Binary files /dev/null and b/memory_agents/ai_consultant_agent/assets/tavily_logo.png differ
diff --git a/memory_agents/ai_consultant_agent/workflow.py b/memory_agents/ai_consultant_agent/workflow.py
index b993346..f2f11b1 100644
--- a/memory_agents/ai_consultant_agent/workflow.py
+++ b/memory_agents/ai_consultant_agent/workflow.py
@@ -1,13 +1,13 @@
"""
AI Consultant Workflow
-Uses LangChain for reasoning and ExaAI for web/case-study research.
+Uses LangChain for reasoning and Tavily for web/case-study research.
"""
import os
from typing import List, Optional, Tuple, Any
from dotenv import load_dotenv
-from exa_py import Exa
+from tavily import TavilyClient
from pydantic import BaseModel, Field
# Load environment variables from .env if present
@@ -54,7 +54,7 @@ class CompanyProfile(BaseModel):
class ResearchSnippet(BaseModel):
- """Single ExaAI search result distilled for prompting."""
+ """Single Tavily search result distilled for prompting."""
title: str
url: str
@@ -81,40 +81,43 @@ def _build_research_query(profile: CompanyProfile) -> str:
return " ".join(parts)
-def search_ai_case_studies_with_exa(
+def search_ai_case_studies_with_tavily(
profile: CompanyProfile, max_results: int = 5
) -> List[ResearchSnippet]:
"""
- Use ExaAI to retrieve a handful of relevant AI case studies / examples.
+ Use Tavily to retrieve a handful of relevant AI case studies / examples.
"""
- exa_key = os.getenv("EXA_API_KEY")
- if not exa_key:
- raise RuntimeError("EXA_API_KEY not set in environment variables")
+ tavily_key = os.getenv("TAVILY_API_KEY")
+ if not tavily_key:
+ raise RuntimeError("TAVILY_API_KEY not set in environment variables")
- client = Exa(api_key=exa_key)
+ client = TavilyClient(api_key=tavily_key)
query = _build_research_query(profile)
try:
- results = client.search_and_contents(
+ # Use advanced depth to get richer snippets; return up to max_results
+ results = client.search(
query=query,
- num_results=max_results,
- type="auto",
- text=True,
+ search_depth="advanced",
+ max_results=max_results,
+ include_answer=False,
+ include_raw_content=False,
)
except Exception as e:
- raise RuntimeError(f"Error calling ExaAI: {e}") from e
+ raise RuntimeError(f"Error calling Tavily: {e}") from e
snippets: List[ResearchSnippet] = []
- for r in results.results:
- if not (r.title or r.url or r.text):
- continue
- text = (r.text or "").strip()
- # Keep a concise but useful excerpt
+ for r in results.get("results", []):
+ title = (r.get("title") or "AI case study").strip()
+ url = r.get("url") or ""
+ text = (r.get("content") or r.get("snippet") or "").strip()
snippet_text = text[:800] if text else ""
+ if not (title or url or snippet_text):
+ continue
snippets.append(
ResearchSnippet(
- title=r.title or "AI case study",
- url=r.url or "",
+ title=title,
+ url=url,
snippet=snippet_text,
)
)
@@ -127,7 +130,7 @@ def run_ai_assessment(
) -> Tuple[str, List[ResearchSnippet]]:
"""
Main workflow:
- - Pull a few relevant case studies via ExaAI.
+ - Pull a few relevant case studies via Tavily.
- Ask the LLM (LangChain-compatible) to produce a structured consulting report.
Returns:
@@ -135,7 +138,7 @@ def run_ai_assessment(
research_snippets: List[ResearchSnippet] -> For optional debugging / display.
"""
# Step 1: web research
- research_snippets = search_ai_case_studies_with_exa(profile, max_results=5)
+ research_snippets = search_ai_case_studies_with_tavily(profile, max_results=5)
# Step 2: build prompt for the consultant LLM
research_section = ""