diff --git a/README.md b/README.md
index b7673ff..aedc547 100644
--- a/README.md
+++ b/README.md
@@ -168,6 +168,7 @@ Interested in sponsoring this project? Feel free to reach out!
- [Job Search Agent](memory_agents/job_search_agent) - Job search agent with memory.
- [Brand Reputation Monitor](memory_agents/brand_reputation_monitor) - AI-powered brand reputation monitoring with news analysis and sentiment tracking.
- [Product Launch Agent](memory_agents/product_launch_agent) - Competitive intelligence tool for analyzing competitor product launches.
+- [AI Consultant Agent](memory_agents/ai_consultant_agent/) - An AI-powered consulting agent that uses **Memori v3** as a long-term memory fabric and **ExaAI** for research.
### π RAG Applications
diff --git a/memory_agents/ai_consultant_agent/.streamlit/config.toml b/memory_agents/ai_consultant_agent/.streamlit/config.toml
new file mode 100644
index 0000000..1c21c27
--- /dev/null
+++ b/memory_agents/ai_consultant_agent/.streamlit/config.toml
@@ -0,0 +1,10 @@
+[theme]
+base = "light"
+
+# Optional overrides for later:
+# primaryColor = "#0b57d0"
+# backgroundColor = "#ffffff"
+# secondaryBackgroundColor = "#f5f7fb"
+# textColor = "#000000"
+
+
diff --git a/memory_agents/ai_consultant_agent/README.md b/memory_agents/ai_consultant_agent/README.md
new file mode 100644
index 0000000..eb02858
--- /dev/null
+++ b/memory_agents/ai_consultant_agent/README.md
@@ -0,0 +1,136 @@
+## AI Consultant Agent with Memori
+
+An AI-powered consulting agent that uses **Memori v3** as a long-term memory fabric and **ExaAI** for research. Built with Streamlit for the UI.
+
+Memori repo: [`GibsonAI/Memori`](https://github.com/GibsonAI/Memori)
+
+## Features
+
+- π§ **AI Readiness Assessment**: Analyze a companyβs AI maturity, goals, and constraints.
+- π― **Use-Case Recommendations**: Suggest where to integrate AI (workflows, CX, analytics, product, ecosystem).
+- π΅ **Cost Bands**: Provide rough cost bands and complexity for proposed AI initiatives.
+- βοΈ **Web / Case-Study Research**: Use **ExaAI** to pull in relevant case studies and industry examples.
+- π§Ύ **Persistent Memory (Memori v3)**: Store and reuse context across assessments and follow-up questions.
+
+## Prerequisites
+
+- Python 3.11 or higher
+- [uv](https://github.com/astral-sh/uv) package manager (fast Python package installer)
+- OpenAI API key (`OPENAI_API_KEY`)
+- ExaAI API key (`EXA_API_KEY`)
+- (Optional) `SQLITE_DB_PATH` if you want to override the default `./memori.sqlite` path
+
+## Installation
+
+### 1. Install `uv`
+
+If you don't have `uv` installed:
+
+```bash
+curl -LsSf https://astral.sh/uv/install.sh | sh
+```
+
+Or using pip:
+
+```bash
+pip install uv
+```
+
+### 2. Clone and Navigate
+
+From the root of the main repo:
+
+```bash
+cd memory_agents/ai_consultant_agent
+```
+
+### 3. Install Dependencies with `uv`
+
+Using `uv` (recommended):
+
+```bash
+uv sync
+```
+
+This will:
+
+- Create a virtual environment automatically.
+- Install all dependencies from `pyproject.toml`.
+- Make the project ready to run.
+
+### 4. Set Up Environment Variables
+
+Create a `.env` file in this directory:
+
+```bash
+OPENAI_API_KEY=your_openai_api_key_here
+EXA_API_KEY=your_exa_api_key_here
+# Optional:
+# SQLITE_DB_PATH=./memori.sqlite
+```
+
+## Usage
+
+### Run the Application
+
+Activate the virtual environment created by `uv` and run Streamlit:
+
+```bash
+# Activate the virtual environment (created by uv)
+source .venv/bin/activate # On macOS/Linux
+# or
+.venv\Scripts\activate # On Windows
+
+# Run the app
+streamlit run app.py
+```
+
+Or using `uv` directly:
+
+```bash
+uv run streamlit run app.py
+```
+
+The app will create (or use) a local **SQLite database** (default `./memori.sqlite`) for Memori v3.
+
+In the UI you can:
+
+1. **Enter API Keys** in the sidebar (or rely on `.env`).
+2. **Configure a Company Profile** in the **AI Assessment** tab.
+3. **Run an AI Assessment** to get:
+ - Recommendation (adopt AI now / later / not yet),
+ - Priority use cases,
+ - Cost bands & risks,
+ - Next-step plan.
+4. **Use the Memory Tab** to ask about:
+ - Previous recommendations,
+ - Previously suggested cost bands,
+ - How new ideas relate to earlier assessments.
+
+## Project Structure
+
+```text
+ai_consultant_agent/
+βββ app.py # Streamlit interface (assessment + memory tabs)
+βββ workflow.py # ExaAI research + OpenAI-based consulting workflow
+βββ pyproject.toml # Project dependencies (uv format)
+βββ README.md # This file
+βββ requirements.txt # PIP-style dependency list
+βββ .streamlit/
+β βββ config.toml # Streamlit theme (light)
+βββ assets/ # Logos (reused from other agents)
+βββ memori.sqlite # Memori database (created automatically)
+```
+
+## License
+
+See the main repository LICENSE file.
+
+## Contributing
+
+Contributions are welcome! Please feel free to submit a Pull Request.
+
+---
+
+Made with β€οΈ by [Studio1](https://www.Studio1hq.com) Team
+
diff --git a/memory_agents/ai_consultant_agent/app.py b/memory_agents/ai_consultant_agent/app.py
new file mode 100644
index 0000000..03efedc
--- /dev/null
+++ b/memory_agents/ai_consultant_agent/app.py
@@ -0,0 +1,374 @@
+"""
+AI Consultant Agent with Memori
+Streamlit interface for AI readiness assessment + memory-powered follow-ups.
+"""
+
+import os
+import base64
+
+import streamlit as st
+from dotenv import load_dotenv
+from memori import Memori
+from openai import OpenAI
+from sqlalchemy import create_engine, text
+from sqlalchemy.orm import sessionmaker
+
+from workflow import CompanyProfile, run_ai_assessment
+
+# Load environment variables
+load_dotenv()
+
+# Page config
+st.set_page_config(
+ page_title="AI Consultant Agent",
+ layout="wide",
+)
+
+
+def _load_inline_image(path: str, height_px: int) -> str:
+ """Return an inline
tag for a local PNG, or empty string on failure."""
+ try:
+ with open(path, "rb") as f:
+ encoded = base64.b64encode(f.read()).decode()
+ return (
+ f"
"
+ )
+ except Exception:
+ return ""
+
+
+# Reuse existing logos from other agents
+memori_img_inline = _load_inline_image(
+ "../job_search_agent/assets/Memori_Logo.png", height_px=90
+)
+exa_img_inline = _load_inline_image(
+ "../job_search_agent/assets/exa_logo.png", height_px=70
+)
+
+title_html = f"""
+
+
+ AI Consultant Agent with
+ {memori_img_inline} and
+ {exa_img_inline}
+
+
+"""
+st.markdown(title_html, unsafe_allow_html=True)
+
+# Sidebar
+with st.sidebar:
+ st.subheader("π API Keys")
+
+ openai_api_key_input = st.text_input(
+ "OpenAI API Key",
+ value=os.getenv("OPENAI_API_KEY", ""),
+ type="password",
+ help="Your OpenAI API key for the consultant LLM (Memori v3 will register this client).",
+ )
+
+ exa_api_key_input = st.text_input(
+ "ExaAI API Key",
+ value=os.getenv("EXA_API_KEY", ""),
+ type="password",
+ help="Your ExaAI API key for web/case-study search",
+ )
+
+ if st.button("Save API Keys"):
+ if openai_api_key_input:
+ os.environ["OPENAI_API_KEY"] = openai_api_key_input
+ if exa_api_key_input:
+ os.environ["EXA_API_KEY"] = exa_api_key_input
+ if openai_api_key_input or exa_api_key_input:
+ st.success("β
API keys saved for this session")
+ else:
+ st.warning("Please enter at least one API key")
+
+ both_keys_present = bool(os.getenv("EXA_API_KEY")) and bool(
+ os.getenv("OPENAI_API_KEY")
+ )
+ if both_keys_present:
+ st.caption("Both API keys detected β
")
+ else:
+ st.caption("Missing API keys β some features may not work β οΈ")
+
+ st.markdown("---")
+ st.markdown("### π‘ About")
+ st.markdown(
+ """
+ This application acts as an **AI consultant** for companies:
+ - Assesses **AI readiness** and where to integrate AI.
+ - Suggests **use cases** across workforce, tools, and ecosystem.
+ - Provides rough **cost bands** and risks.
+ - Uses **Memori** + to remember past assessments and Q&A.
+
+ Web research is powered by **ExaAI**, and reasoning is powered by **OpenAI** via Memori.
+
+ ---
+
+ Made with β€οΈ by [Studio1](https://www.Studio1hq.com) Team
+ """
+ )
+
+# Get API keys from environment
+exa_key = os.getenv("EXA_API_KEY", "")
+
+# Initialize session state
+if "assessment_markdown" not in st.session_state:
+ st.session_state.assessment_markdown = None
+
+if "company_profile" not in st.session_state:
+ st.session_state.company_profile = None
+
+if "memory_messages" not in st.session_state:
+ st.session_state.memory_messages = []
+
+# Initialize Memori v3 + OpenAI client (once)
+if "openai_client" not in st.session_state:
+ openai_key = os.getenv("OPENAI_API_KEY", "")
+ if not openai_key:
+ st.warning("OPENAI_API_KEY is not set β Memori v3 will not be active.")
+ else:
+ try:
+ db_path = os.getenv("SQLITE_DB_PATH", "./memori.sqlite")
+ database_url = f"sqlite:///{db_path}"
+ engine = create_engine(
+ database_url,
+ pool_pre_ping=True,
+ connect_args={"check_same_thread": False},
+ )
+ # Optional DB connectivity check
+ with engine.connect() as conn:
+ conn.execute(text("SELECT 1"))
+
+ SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
+
+ client = OpenAI(api_key=openai_key)
+ mem = Memori(conn=SessionLocal).openai.register(client)
+ # Basic attribution so Memori can attach memories
+ mem.attribution(entity_id="ai-consultant-user", process_id="ai-consultant")
+ mem.config.storage.build()
+
+ st.session_state.memori = mem
+ st.session_state.openai_client = client
+ except Exception as e:
+ st.warning(f"Memori v3 initialization note: {str(e)}")
+
+# Check if keys are set for required services
+if not exa_key:
+ st.warning("β οΈ Please enter your ExaAI API key in the sidebar to run assessments!")
+ st.stop()
+if "openai_client" not in st.session_state:
+ st.warning(
+ "β οΈ OPENAI_API_KEY missing or Memori v3 failed to initialize β "
+ "LLM responses will not work."
+ )
+ st.stop()
+
+# Tabs: Assessment + Memory
+tab1, tab2 = st.tabs(["π AI Assessment", "π§ Memory"])
+
+with tab1:
+ st.markdown("#### Configure Company Profile & AI Assessment")
+
+ col1, col2 = st.columns([2, 1])
+ with col1:
+ company_name = st.text_input(
+ "Company Name *",
+ placeholder="e.g., Acme Corp",
+ help="The company you are assessing",
+ )
+ industry = st.text_input(
+ "Industry *",
+ placeholder="e.g., Retail, Fintech, Manufacturing",
+ help="Primary industry or sector",
+ )
+ region = st.text_input(
+ "Region / Market",
+ placeholder="e.g., US, EU, Global, APAC",
+ help="Where the company primarily operates",
+ )
+ with col2:
+ company_size = st.selectbox(
+ "Company Size *",
+ options=["1-50", "51-200", "201-1000", "1000+"],
+ help="Rough employee headcount band",
+ )
+ tech_maturity = st.selectbox(
+ "Tech & Data Maturity *",
+ options=["Low", "Medium", "High"],
+ help="How mature is their data/engineering stack?",
+ )
+
+ goals = st.multiselect(
+ "Business Goals for AI",
+ options=[
+ "Cost reduction",
+ "Revenue growth",
+ "Customer experience",
+ "Operational efficiency",
+ "Risk & compliance",
+ "Innovation / new products",
+ ],
+ help="What is leadership trying to achieve with AI?",
+ )
+
+ ai_focus_areas = st.multiselect(
+ "AI Focus Areas",
+ options=[
+ "Internal workflows & automation",
+ "Customer support / CX",
+ "Analytics & BI",
+ "Product features",
+ "Partner ecosystem / APIs",
+ ],
+ help="Where should we consider integrating AI?",
+ )
+
+ col3, col4 = st.columns(2)
+ with col3:
+ budget_range = st.selectbox(
+ "Rough Budget Range *",
+ options=["< $50k", "$50k-$250k", "$250k-$1M", ">$1M"],
+ )
+ with col4:
+ time_horizon = st.selectbox(
+ "Time Horizon for Initial Rollout *",
+ options=["0-3 months", "3-6 months", "6-12 months", "12+ months"],
+ )
+
+ notes = st.text_area(
+ "Additional Notes",
+ placeholder="Any constraints, existing systems, data sources, or regulatory considerations.",
+ height=120,
+ )
+
+ run_assessment = st.button("π Run AI Assessment", type="primary")
+
+ if run_assessment:
+ if not company_name or not industry:
+ st.error("Please provide at least a company name and industry.")
+ else:
+ try:
+ profile = CompanyProfile(
+ company_name=company_name.strip(),
+ industry=industry.strip(),
+ company_size=company_size,
+ region=region.strip() if region else None,
+ tech_maturity=tech_maturity,
+ goals=goals,
+ ai_focus_areas=ai_focus_areas,
+ budget_range=budget_range,
+ time_horizon=time_horizon,
+ notes=notes.strip() if notes else None,
+ )
+ except Exception as e:
+ st.error(f"Invalid configuration: {e}")
+ else:
+ with st.spinner("π€ Running AI assessment (research + reasoning)..."):
+ try:
+ assessment_markdown, _snippets = run_ai_assessment(
+ profile, st.session_state.openai_client
+ )
+ st.session_state.assessment_markdown = assessment_markdown
+ st.session_state.company_profile = profile
+
+ st.markdown(
+ f"## π§Ύ AI Readiness & Cost Assessment for **{profile.company_name}**"
+ )
+ st.markdown(assessment_markdown)
+
+ # With Memori v3, conversations are captured automatically
+ # via the registered OpenAI client, so no manual recording here.
+ except Exception as e:
+ st.error(f"β Error during assessment: {e}")
+
+ # Show last assessment if available and we didn't just run a new one
+ if st.session_state.assessment_markdown and not run_assessment:
+ st.markdown(
+ "### Last Assessment Result "
+ + (
+ f"for **{st.session_state.company_profile.company_name}**"
+ if st.session_state.company_profile
+ else ""
+ )
+ )
+ st.markdown(st.session_state.assessment_markdown)
+
+with tab2:
+ st.markdown("#### Ask about past AI assessments")
+
+ if st.session_state.company_profile:
+ st.info(
+ f"Most recent company: **{st.session_state.company_profile.company_name}** "
+ f"({st.session_state.company_profile.industry})"
+ )
+ else:
+ st.info(
+ "Run at least one assessment in the **AI Assessment** tab to ground the memory context."
+ )
+
+ for message in st.session_state.memory_messages:
+ with st.chat_message(message["role"]):
+ st.markdown(message["content"])
+
+ memory_prompt = st.chat_input("Ask about past AI assessments (Memori-powered)β¦")
+
+ if memory_prompt:
+ st.session_state.memory_messages.append(
+ {"role": "user", "content": memory_prompt}
+ )
+ with st.chat_message("user"):
+ st.markdown(memory_prompt)
+
+ with st.chat_message("assistant"):
+ with st.spinner("π€ Thinkingβ¦"):
+ try:
+ latest_context = ""
+ if (
+ st.session_state.assessment_markdown
+ and st.session_state.company_profile
+ ):
+ p = st.session_state.company_profile
+ latest_context = (
+ f"\n\nLatest assessment summary for {p.company_name} "
+ f"({p.industry}, {p.company_size}, {p.tech_maturity} tech maturity):\n"
+ f"{st.session_state.assessment_markdown[:1500]}\n"
+ )
+
+ full_prompt = f"""You are an AI consultant assistant with access to:
+1. Stored AI readiness assessments (captured automatically by Memori v3).
+2. The latest assessment in this session (if any).
+
+You can answer questions about:
+- What was previously recommended for a given company or industry.
+- Whether AI was suggested for specific areas (workforce, tools, ecosystem, etc.).
+- Cost bands, risks, and next steps that were advised before.
+- How new questions relate to past assessments.
+
+Use your memory of prior interactions (via Memori) plus the context below:
+{latest_context}
+
+Answer questions helpfully and concisely. If asked outside this scope, politely say you only answer about AI consulting and stored assessments."""
+
+ response = st.session_state.openai_client.chat.completions.create(
+ model="gpt-4o-mini",
+ messages=[
+ {"role": "system", "content": full_prompt},
+ {"role": "user", "content": memory_prompt},
+ ],
+ )
+ response_text = response.choices[0].message.content
+
+ st.session_state.memory_messages.append(
+ {"role": "assistant", "content": response_text}
+ )
+ st.markdown(response_text)
+ except Exception as e:
+ err = f"β Error: {e}"
+ st.session_state.memory_messages.append(
+ {"role": "assistant", "content": err}
+ )
+ st.error(err)
diff --git a/memory_agents/ai_consultant_agent/pyproject.toml b/memory_agents/ai_consultant_agent/pyproject.toml
new file mode 100644
index 0000000..e8cdec9
--- /dev/null
+++ b/memory_agents/ai_consultant_agent/pyproject.toml
@@ -0,0 +1,20 @@
+[project]
+name = "ai-consultant-agent"
+version = "0.1.0"
+description = "AI Consultant Agent with LangChain, ExaAI, and Memori"
+readme = "README.md"
+requires-python = ">=3.11"
+dependencies = [
+ "langchain>=0.1.0",
+ "langchain-nebius>=0.1.3",
+ "langchain-core>=0.1.0",
+ "exa-py>=1.0.0",
+ "memori>=3.0.0b3",
+ "sqlalchemy>=2.0.0",
+ "openai>=2.6.1",
+ "streamlit>=1.50.0",
+ "pydantic>=2.0.0",
+ "python-dotenv>=1.0.0",
+]
+
+
diff --git a/memory_agents/ai_consultant_agent/workflow.py b/memory_agents/ai_consultant_agent/workflow.py
new file mode 100644
index 0000000..b993346
--- /dev/null
+++ b/memory_agents/ai_consultant_agent/workflow.py
@@ -0,0 +1,203 @@
+"""
+AI Consultant Workflow
+Uses LangChain for reasoning and ExaAI for web/case-study research.
+"""
+
+import os
+from typing import List, Optional, Tuple, Any
+
+from dotenv import load_dotenv
+from exa_py import Exa
+from pydantic import BaseModel, Field
+
+# Load environment variables from .env if present
+load_dotenv()
+
+
+class CompanyProfile(BaseModel):
+ """Structured description of the company and its AI needs."""
+
+ company_name: str = Field(..., min_length=1)
+ industry: str = Field(..., min_length=1)
+ company_size: str = Field(
+ ...,
+ description="Rough size band, e.g. '1-50', '51-200', '201-1000', '1000+'.",
+ )
+ region: Optional[str] = Field(
+ default=None,
+ description="Geography or market (e.g. 'US', 'EU', 'Global', 'APAC').",
+ )
+ tech_maturity: str = Field(
+ ...,
+ description="Low / Medium / High description of data & engineering maturity.",
+ )
+ goals: List[str] = Field(
+ default_factory=list,
+ description="High-level business goals for AI (cost, revenue, CX, risk, innovation).",
+ )
+ ai_focus_areas: List[str] = Field(
+ default_factory=list,
+ description="Where to consider AI (workflows, support, analytics, product, ecosystem).",
+ )
+ budget_range: str = Field(
+ ...,
+ description="Rough budget band (e.g. '<$50k', '$50k-$250k', '$250k-$1M', '>$1M').",
+ )
+ time_horizon: str = Field(
+ ...,
+ description="Time horizon for initial AI rollout (e.g. '0-3 months', '3-6 months').",
+ )
+ notes: Optional[str] = Field(
+ default=None,
+ description="Free-text context: constraints, data sources, regulatory considerations, etc.",
+ )
+
+
+class ResearchSnippet(BaseModel):
+ """Single ExaAI search result distilled for prompting."""
+
+ title: str
+ url: str
+ snippet: str
+
+
+def _build_research_query(profile: CompanyProfile) -> str:
+ """Build a focused search query for AI adoption / case studies."""
+ parts: List[str] = [
+ profile.industry,
+ "AI adoption case studies",
+ "enterprise",
+ ]
+
+ if profile.company_size:
+ parts.append(f"company size {profile.company_size}")
+
+ if profile.region:
+ parts.append(profile.region)
+
+ if profile.goals:
+ parts.append(" ".join(profile.goals))
+
+ return " ".join(parts)
+
+
+def search_ai_case_studies_with_exa(
+ profile: CompanyProfile, max_results: int = 5
+) -> List[ResearchSnippet]:
+ """
+ Use ExaAI to retrieve a handful of relevant AI case studies / examples.
+ """
+ exa_key = os.getenv("EXA_API_KEY")
+ if not exa_key:
+ raise RuntimeError("EXA_API_KEY not set in environment variables")
+
+ client = Exa(api_key=exa_key)
+ query = _build_research_query(profile)
+
+ try:
+ results = client.search_and_contents(
+ query=query,
+ num_results=max_results,
+ type="auto",
+ text=True,
+ )
+ except Exception as e:
+ raise RuntimeError(f"Error calling ExaAI: {e}") from e
+
+ snippets: List[ResearchSnippet] = []
+ for r in results.results:
+ if not (r.title or r.url or r.text):
+ continue
+ text = (r.text or "").strip()
+ # Keep a concise but useful excerpt
+ snippet_text = text[:800] if text else ""
+ snippets.append(
+ ResearchSnippet(
+ title=r.title or "AI case study",
+ url=r.url or "",
+ snippet=snippet_text,
+ )
+ )
+
+ return snippets
+
+
+def run_ai_assessment(
+ profile: CompanyProfile, openai_client: Any
+) -> Tuple[str, List[ResearchSnippet]]:
+ """
+ Main workflow:
+ - Pull a few relevant case studies via ExaAI.
+ - Ask the LLM (LangChain-compatible) to produce a structured consulting report.
+
+ Returns:
+ assessment_markdown: str -> Markdown report for display.
+ research_snippets: List[ResearchSnippet] -> For optional debugging / display.
+ """
+ # Step 1: web research
+ research_snippets = search_ai_case_studies_with_exa(profile, max_results=5)
+
+ # Step 2: build prompt for the consultant LLM
+ research_section = ""
+ if research_snippets:
+ lines: List[str] = []
+ for i, s in enumerate(research_snippets, start=1):
+ lines.append(f"{i}. {s.title} ({s.url})\n" f"{s.snippet}\n")
+ research_section = "\n".join(lines)
+ else:
+ research_section = (
+ "No external case studies were found. Rely on your general knowledge."
+ )
+
+ goals_str = ", ".join(profile.goals) if profile.goals else "Not specified"
+ areas_str = (
+ ", ".join(profile.ai_focus_areas) if profile.ai_focus_areas else "Not specified"
+ )
+
+ system_prompt = (
+ "You are a senior AI transformation consultant. "
+ "You give pragmatic, business-focused advice about whether and how a company "
+ "should adopt AI, including costs, risks, and change management."
+ )
+
+ user_prompt = (
+ f"Company profile:\n"
+ f"- Name: {profile.company_name}\n"
+ f"- Industry: {profile.industry}\n"
+ f"- Company size: {profile.company_size}\n"
+ f"- Region / market: {profile.region or 'Not specified'}\n"
+ f"- Tech maturity: {profile.tech_maturity}\n"
+ f"- Goals: {goals_str}\n"
+ f"- AI focus areas: {areas_str}\n"
+ f"- Budget range: {profile.budget_range}\n"
+ f"- Time horizon: {profile.time_horizon}\n"
+ f"- Additional notes: {profile.notes or 'None'}\n\n"
+ f"Relevant AI adoption / case-study research:\n"
+ f"{research_section}\n\n"
+ "Task:\n"
+ "1. Decide whether they should integrate AI now, later, or not at all. Be explicit.\n"
+ "2. Recommend specific AI use cases, grouped by area (workforce, internal tools, ecosystem, etc.).\n"
+ "3. Provide rough cost bands (e.g. '<$50k', '$50k-$250k', '$250k-$1M', '>$1M') and key cost drivers.\n"
+ "4. Call out major risks, dependencies, and change-management considerations.\n"
+ "5. Summarize concrete next steps the company should take in the next 30β90 days.\n\n"
+ "Respond in clear Markdown with the following sections and nothing else:\n"
+ "## Recommendation\n"
+ "## Priority AI Use Cases\n"
+ "## Cost & Complexity\n"
+ "## Risks & Considerations\n"
+ "## Next Steps\n"
+ )
+
+ try:
+ response = openai_client.chat.completions.create(
+ model="gpt-4o-mini",
+ messages=[
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_prompt},
+ ],
+ )
+ except Exception as e:
+ raise RuntimeError(f"Error calling consultant LLM: {e}") from e
+
+ assessment_markdown = response.choices[0].message.content
+ return assessment_markdown, research_snippets
diff --git a/memory_agents/job_search_agent/.env.example.local-backup b/memory_agents/job_search_agent/.env.example.local-backup
new file mode 100644
index 0000000..f0cc9b2
--- /dev/null
+++ b/memory_agents/job_search_agent/.env.example.local-backup
@@ -0,0 +1,2 @@
+EXA_API_KEY=your_exa_api_key_here
+NEBIUS_API_KEY=your_nebius_api_key_here
\ No newline at end of file