diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 000000000..f5f168660
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,22 @@
+name: CI
+
+on: [push, pull_request]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.11'
+ - name: Install Poetry
+ run: pip install poetry
+ - name: Install deps
+ run: poetry install --no-interaction --no-root
+ - name: Run equity tests
+ run: pytest -q
+ - name: Run crypto tests
+ env:
+ ASSET_CLASS: CRYPTO
+ run: pytest -q
diff --git a/Dockerfile b/Dockerfile
index e97dd7a7c..5605ad246 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -13,7 +13,8 @@ COPY pyproject.toml poetry.lock* /app/
# Configure Poetry to not use a virtual environment
RUN poetry config virtualenvs.create false \
- && poetry install --no-interaction --no-ansi
+ && poetry install --no-interaction --no-ansi --no-root \
+ && pip install ccxt pycoingecko
# Copy rest of the source code
COPY . /app/
diff --git a/README.md b/README.md
index eb0d1ff64..09fb613ec 100644
--- a/README.md
+++ b/README.md
@@ -78,6 +78,23 @@ poetry install
cp .env.example .env
```
+## 🚀 Crypto Quick-Start
+
+Enable crypto trading by setting the `ASSET_CLASS` environment variable:
+
+```bash
+ASSET_CLASS=CRYPTO python src/main.py --pair BTC/USDT --exchange binance
+```
+
+Set `ALLOW_MARGIN=1` to enable margin trading.
+
+### Environment Variables
+
+| Variable | Description | Default |
+|---|---|---|
+| `ASSET_CLASS` | `EQUITY` or `CRYPTO` | `EQUITY` |
+| `ALLOW_MARGIN` | Enable shorting/margin when set to `1` | `0` |
+
4. Set your API keys:
```bash
# For running LLMs hosted by openai (gpt-4o, gpt-4o-mini, etc.)
diff --git a/app/backend/models/schemas.py b/app/backend/models/schemas.py
index 3613f6054..af4011fb7 100644
--- a/app/backend/models/schemas.py
+++ b/app/backend/models/schemas.py
@@ -1,6 +1,8 @@
from datetime import datetime, timedelta
-from pydantic import BaseModel, Field
from typing import List, Optional
+
+from pydantic import BaseModel, Field
+
from src.llm.models import ModelProvider
@@ -15,7 +17,9 @@ class ErrorResponse(BaseModel):
class HedgeFundRequest(BaseModel):
- tickers: List[str]
+ pairs: List[str] = Field(..., example=["BTC/USDT"])
+ exchange: str = Field("binance", example="binance")
+ tickers: Optional[List[str]] = Field(None, deprecated=True)
selected_agents: List[str]
end_date: Optional[str] = Field(default_factory=lambda: datetime.now().strftime("%Y-%m-%d"))
start_date: Optional[str] = None
diff --git a/app/backend/routes/hedge_fund.py b/app/backend/routes/hedge_fund.py
index df7b6f1e2..f315cfc0c 100644
--- a/app/backend/routes/hedge_fund.py
+++ b/app/backend/routes/hedge_fund.py
@@ -1,10 +1,17 @@
+import asyncio
+
from fastapi import APIRouter, HTTPException
from fastapi.responses import StreamingResponse
-import asyncio
+from app.backend.models.events import (
+ CompleteEvent,
+ ErrorEvent,
+ ProgressUpdateEvent,
+ StartEvent,
+)
from app.backend.models.schemas import ErrorResponse, HedgeFundRequest
-from app.backend.models.events import StartEvent, ProgressUpdateEvent, ErrorEvent, CompleteEvent
-from app.backend.services.graph import create_graph, parse_hedge_fund_response, run_graph_async
+from app.backend.services.graph import create_graph, run_graph_async
+from src.utils.parsing import parse_hedge_fund_response
from app.backend.services.portfolio import create_portfolio
from src.utils.progress import progress
@@ -22,7 +29,8 @@
async def run_hedge_fund(request: HedgeFundRequest):
try:
# Create the portfolio
- portfolio = create_portfolio(request.initial_cash, request.margin_requirement, request.tickers)
+ symbols = request.pairs or request.tickers
+ portfolio = create_portfolio(request.initial_cash, request.margin_requirement, symbols)
# Construct agent graph
graph = create_graph(request.selected_agents)
@@ -55,11 +63,12 @@ def progress_handler(agent_name, ticker, status, analysis, timestamp):
run_graph_async(
graph=graph,
portfolio=portfolio,
- tickers=request.tickers,
+ tickers=symbols,
start_date=request.start_date,
end_date=request.end_date,
model_name=request.model_name,
model_provider=model_provider,
+ exchange=request.exchange,
)
)
# Send initial message
diff --git a/app/backend/services/graph.py b/app/backend/services/graph.py
index 13920d76b..1828e8a40 100644
--- a/app/backend/services/graph.py
+++ b/app/backend/services/graph.py
@@ -1,13 +1,16 @@
import asyncio
import json
+
+from src.utils.parsing import parse_hedge_fund_response
+
from langchain_core.messages import HumanMessage
from langgraph.graph import END, StateGraph
from src.agents.portfolio_manager import portfolio_management_agent
from src.agents.risk_manager import risk_management_agent
+from src.graph.state import AgentState
from src.main import start
from src.utils.analysts import ANALYST_CONFIG
-from src.graph.state import AgentState
# Helper function to create the agent graph
@@ -48,12 +51,15 @@ def create_graph(selected_agents: list[str]) -> StateGraph:
return graph
-async def run_graph_async(graph, portfolio, tickers, start_date, end_date, model_name, model_provider):
+async def run_graph_async(graph, portfolio, tickers, start_date, end_date, model_name, model_provider, exchange=""):
"""Async wrapper for run_graph to work with asyncio."""
# Use run_in_executor to run the synchronous function in a separate thread
# so it doesn't block the event loop
loop = asyncio.get_running_loop()
- result = await loop.run_in_executor(None, lambda: run_graph(graph, portfolio, tickers, start_date, end_date, model_name, model_provider)) # Use default executor
+ result = await loop.run_in_executor(
+ None,
+ lambda: run_graph(graph, portfolio, tickers, start_date, end_date, model_name, model_provider, exchange),
+ )
return result
@@ -65,6 +71,7 @@ def run_graph(
end_date: str,
model_name: str,
model_provider: str,
+ exchange: str = "",
) -> dict:
"""
Run the graph with the given portfolio, tickers,
@@ -84,6 +91,7 @@ def run_graph(
"start_date": start_date,
"end_date": end_date,
"analyst_signals": {},
+ "exchange": exchange,
},
"metadata": {
"show_reasoning": False,
@@ -92,18 +100,3 @@ def run_graph(
},
},
)
-
-
-def parse_hedge_fund_response(response):
- """Parses a JSON string and returns a dictionary."""
- try:
- return json.loads(response)
- except json.JSONDecodeError as e:
- print(f"JSON decoding error: {e}\nResponse: {repr(response)}")
- return None
- except TypeError as e:
- print(f"Invalid response type (expected string, got {type(response).__name__}): {e}")
- return None
- except Exception as e:
- print(f"Unexpected error while parsing response: {e}\nResponse: {repr(response)}")
- return None
diff --git a/app/backend/services/portfolio.py b/app/backend/services/portfolio.py
index 3b700a284..66e4c1a3e 100644
--- a/app/backend/services/portfolio.py
+++ b/app/backend/services/portfolio.py
@@ -1,7 +1,6 @@
-
def create_portfolio(initial_cash: float, margin_requirement: float, tickers: list[str]) -> dict:
- return {
+ return {
"cash": initial_cash, # Initial cash amount
"margin_requirement": margin_requirement, # Initial margin requirement
"margin_used": 0.0, # total margin usage across all short positions
@@ -22,4 +21,4 @@ def create_portfolio(initial_cash: float, margin_requirement: float, tickers: li
}
for ticker in tickers
},
- }
\ No newline at end of file
+ }
diff --git a/app/frontend/index.html b/app/frontend/index.html
index c841cec73..370e82f21 100644
--- a/app/frontend/index.html
+++ b/app/frontend/index.html
@@ -1,5 +1,5 @@
-
+
diff --git a/app/frontend/src/components/PairSelector.tsx b/app/frontend/src/components/PairSelector.tsx
new file mode 100644
index 000000000..80cae773f
--- /dev/null
+++ b/app/frontend/src/components/PairSelector.tsx
@@ -0,0 +1,35 @@
+import { useEffect, useState } from 'react';
+
+interface PairSelectorProps {
+ value: string;
+ onChange: (pair: string) => void;
+}
+
+export function PairSelector({ value, onChange }: PairSelectorProps) {
+ const [pairs, setPairs] = useState([]);
+
+ useEffect(() => {
+ async function fetchPairs() {
+ try {
+ const res = await fetch(
+ 'https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order=market_cap_desc&per_page=100&page=1'
+ );
+ const data = await res.json();
+ setPairs(data.map((c: any) => `${c.symbol.toUpperCase()}/USDT`));
+ } catch (e) {
+ console.error('Failed to fetch coin list', e);
+ }
+ }
+ fetchPairs();
+ }, []);
+
+ return (
+
+ );
+}
diff --git a/app/frontend/src/nodes/components/agent-output-dialog.tsx b/app/frontend/src/nodes/components/agent-output-dialog.tsx
index ca5f72da0..700d390a9 100644
--- a/app/frontend/src/nodes/components/agent-output-dialog.tsx
+++ b/app/frontend/src/nodes/components/agent-output-dialog.tsx
@@ -138,10 +138,10 @@ export function AgentOutputDialog({
Analysis
- {/* Ticker selector */}
+ {/* Pair selector */}
{tickersWithDecisions.length > 0 && (
- Ticker:
+ Pair:
diff --git a/app/frontend/src/nodes/components/text-output-dialog.tsx b/app/frontend/src/nodes/components/text-output-dialog.tsx
index e630c5f7a..6711bd1c8 100644
--- a/app/frontend/src/nodes/components/text-output-dialog.tsx
+++ b/app/frontend/src/nodes/components/text-output-dialog.tsx
@@ -108,7 +108,7 @@ export function TextOutputDialog({
- Ticker
+ Pair
Price
Action
Quantity
diff --git a/docker-compose.yml b/docker-compose.yml
index 64032670e..7a641c7df 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -88,5 +88,21 @@ services:
tty: true
stdin_open: true
+ hedge-fund-crypto:
+ build: .
+ image: ai-hedge-fund
+ depends_on:
+ - ollama
+ volumes:
+ - ./.env:/app/.env
+ command: python src/main.py --pair BTC/USDT --exchange binance
+ environment:
+ - PYTHONUNBUFFERED=1
+ - OLLAMA_BASE_URL=http://ollama:11434
+ - PYTHONPATH=/app
+ - ASSET_CLASS=CRYPTO
+ tty: true
+ stdin_open: true
+
volumes:
ollama_data:
\ No newline at end of file
diff --git a/docs/adr/0002-enable-crypto.md b/docs/adr/0002-enable-crypto.md
new file mode 100644
index 000000000..aa1afae79
--- /dev/null
+++ b/docs/adr/0002-enable-crypto.md
@@ -0,0 +1,15 @@
+# 0002 - Enable Crypto Asset Class
+
+## Context
+
+The platform originally supported only equity trading using daily OHLCV data. Expanding to spot crypto pairs requires new data sources, risk constraints, and agent logic.
+
+## Decision
+
+Introduce a feature flag `ASSET_CLASS` defaulting to `EQUITY`. When set to `CRYPTO`, the system routes price data through CCXT, adds crypto specific analysts, and adjusts risk management.
+
+## Consequences
+
+- Maintains backward compatibility with equity workflows.
+- Adds dependencies on `ccxt` and `pycoingecko`.
+- CI runs test suites for both asset classes.
diff --git a/docs/huong_dan_vi.md b/docs/huong_dan_vi.md
new file mode 100644
index 000000000..537bf2672
--- /dev/null
+++ b/docs/huong_dan_vi.md
@@ -0,0 +1,86 @@
+# Hướng Dẫn Sử Dụng AI Hedge Fund
+
+Tài liệu này giải thích cách cài đặt và chạy dự án **AI Hedge Fund** bằng tiếng Việt. Mục tiêu của tài liệu là giúp người mới không rành code vẫn có thể sử dụng hệ thống.
+
+## 1. Giới Thiệu Nhanh
+
+AI Hedge Fund là một dự án minh hoạ việc dùng trí tuệ nhân tạo để đưa ra quyết định giao dịch. Phần mềm chỉ dành cho **mục đích học tập và nghiên cứu**, không phải để đầu tư thật. Tác giả không chịu trách nhiệm về bất kỳ rủi ro tài chính nào.
+
+## 2. Chuẩn Bị Môi Trường
+
+- Máy tính cần cài sẵn **Python 3**, **Git** và **Poetry**.
+- Nếu muốn chạy giao diện web (frontend), bạn cần cài thêm **Node.js**.
+- Với người dùng Windows, dùng file `run.bat`.
+- Với người dùng Mac/Linux, dùng file `run.sh`.
+
+## 3. Tải Mã Nguồn
+
+Mở cửa sổ dòng lệnh và chạy:
+
+```bash
+git clone https://github.com/virattt/ai-hedge-fund.git
+cd ai-hedge-fund
+```
+
+## 4. Tạo File Cấu Hình `.env`
+
+Sao chép file mẫu và chỉnh sửa để thêm các khoá API của bạn:
+
+```bash
+cp .env.example .env
+```
+
+Mở file `.env` và điền các khoá như `OPENAI_API_KEY`, `GROQ_API_KEY`, `FINANCIAL_DATASETS_API_KEY` (nếu có). Đây là các khoá dùng để gọi mô hình AI và lấy dữ liệu thị trường.
+
+## 5. Cách Chạy Nhanh
+
+### Trên Mac/Linux
+
+```bash
+./run.sh
+```
+
+Nếu gặp lỗi không có quyền chạy, hãy chạy lệnh sau rồi thử lại:
+
+```bash
+chmod +x run.sh && ./run.sh
+```
+
+### Trên Windows
+
+Mở Command Prompt và chạy:
+
+```cmd
+run.bat
+```
+
+Script sẽ tự động cài các phụ thuộc cần thiết và khởi động dịch vụ. Khi chạy xong, bạn có thể mở trình duyệt tới địa chỉ `http://localhost:5173` để xem giao diện (nếu sử dụng phiên bản có frontend) hoặc theo dõi kết quả ở màn hình dòng lệnh.
+
+## 6. Tuỳ Chọn Giao Dịch Crypto
+
+Nếu muốn bật chế độ giao dịch tiền mã hoá, đặt biến môi trường `ASSET_CLASS=CRYPTO` trước khi chạy. Ví dụ:
+
+```bash
+ASSET_CLASS=CRYPTO ./run.sh --ticker BTC/USDT --exchange binance
+```
+
+## 7. Chạy Thủ Công Bằng Poetry (Tuỳ Chọn)
+
+Người dùng có kinh nghiệm hơn có thể chạy trực tiếp bằng Poetry:
+
+```bash
+poetry install
+poetry run python src/main.py --tickers AAPL,MSFT,NVDA
+```
+
+Có thể thêm các tuỳ chọn `--start-date`, `--end-date`, `--show-reasoning` hoặc `--ollama` để dùng mô hình LLM cục bộ.
+
+## 8. Lưu Ý Quan Trọng
+
+- Phần mềm này chỉ phục vụ **mục đích học tập**.
+- Không nên dùng để giao dịch với tiền thật.
+- Kết quả trong quá khứ không đảm bảo cho tương lai.
+- Tác giả không chịu trách nhiệm cho bất kỳ khoản lỗ nào.
+
+Chúc bạn thành công trong quá trình khám phá dự án!
+
diff --git a/poetry.lock b/poetry.lock
index 151478058..47a08f65a 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,19 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
+
+[[package]]
+name = "aiodns"
+version = "3.4.0"
+description = "Simple DNS resolver for asyncio"
+optional = false
+python-versions = ">=3.9"
+groups = ["main"]
+files = [
+ {file = "aiodns-3.4.0-py3-none-any.whl", hash = "sha256:4da2b25f7475343f3afbb363a2bfe46afa544f2b318acb9a945065e622f4ed24"},
+ {file = "aiodns-3.4.0.tar.gz", hash = "sha256:24b0ae58410530367f21234d0c848e4de52c1f16fbddc111726a4ab536ec1b2f"},
+]
+
+[package.dependencies]
+pycares = ">=4.0.0"
[[package]]
name = "aiohappyeyeballs"
@@ -6,6 +21,7 @@ version = "2.6.1"
description = "Happy Eyeballs for asyncio"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8"},
{file = "aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558"},
@@ -17,6 +33,7 @@ version = "3.11.18"
description = "Async http client/server framework (asyncio)"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "aiohttp-3.11.18-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:96264854fedbea933a9ca4b7e0c745728f01380691687b7365d18d9e977179c4"},
{file = "aiohttp-3.11.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9602044ff047043430452bc3a2089743fa85da829e6fc9ee0025351d66c332b6"},
@@ -111,7 +128,7 @@ propcache = ">=0.2.0"
yarl = ">=1.17.0,<2.0"
[package.extras]
-speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"]
+speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
[[package]]
name = "aiosignal"
@@ -119,6 +136,7 @@ version = "1.3.2"
description = "aiosignal: a list of registered asynchronous callbacks"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"},
{file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"},
@@ -133,6 +151,7 @@ version = "1.15.2"
description = "A database migration tool for SQLAlchemy."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "alembic-1.15.2-py3-none-any.whl", hash = "sha256:2e76bd916d547f6900ec4bb5a90aeac1485d2c92536923d0b138c02b126edc53"},
{file = "alembic-1.15.2.tar.gz", hash = "sha256:1c72391bbdeffccfe317eefba686cb9a3c078005478885413b95c3b26c57a8a7"},
@@ -152,6 +171,7 @@ version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
@@ -163,6 +183,7 @@ version = "0.50.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "anthropic-0.50.0-py3-none-any.whl", hash = "sha256:defbd79327ca2fa61fd7b9eb2f1627dfb1f69c25d49288c52e167ddb84574f80"},
{file = "anthropic-0.50.0.tar.gz", hash = "sha256:42175ec04ce4ff2fa37cd436710206aadff546ee99d70d974699f59b49adc66f"},
@@ -187,6 +208,7 @@ version = "3.7.1"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"},
{file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"},
@@ -198,7 +220,7 @@ sniffio = ">=1.1"
[package.extras]
doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"]
-test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4) ; python_version < \"3.8\"", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; python_version < \"3.12\" and platform_python_implementation == \"CPython\" and platform_system != \"Windows\""]
trio = ["trio (<0.22)"]
[[package]]
@@ -207,18 +229,19 @@ version = "25.3.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"},
{file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"},
]
[package.extras]
-benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"]
-tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
+tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""]
[[package]]
name = "black"
@@ -226,6 +249,7 @@ version = "23.12.1"
description = "The uncompromising code formatter."
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2"},
{file = "black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba"},
@@ -260,7 +284,7 @@ platformdirs = ">=2"
[package.extras]
colorama = ["colorama (>=0.4.3)"]
-d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
+d = ["aiohttp (>=3.7.4) ; sys_platform != \"win32\" or implementation_name != \"pypy\"", "aiohttp (>=3.7.4,!=3.9.0) ; sys_platform == \"win32\" and implementation_name == \"pypy\""]
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
uvloop = ["uvloop (>=0.15.2)"]
@@ -270,28 +294,137 @@ version = "5.5.2"
description = "Extensible memoizing collections and decorators"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a"},
{file = "cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4"},
]
+[[package]]
+name = "ccxt"
+version = "4.4.87"
+description = "A JavaScript / TypeScript / Python / C# / PHP cryptocurrency trading library with support for 100+ exchanges"
+optional = false
+python-versions = "*"
+groups = ["main"]
+files = [
+ {file = "ccxt-4.4.87-py2.py3-none-any.whl", hash = "sha256:709e950078bb83f7ad02d5ddf898b00693ad8a39b4e0f06b1c4e3b886934871e"},
+ {file = "ccxt-4.4.87.tar.gz", hash = "sha256:1b511ccdc55a1096f4550712883f46ca6e022caf9e78758f36ad81a43f1cfbf2"},
+]
+
+[package.dependencies]
+aiodns = {version = ">=1.1.1", markers = "python_version >= \"3.5.2\""}
+aiohttp = {version = ">=3.10.11", markers = "python_version >= \"3.5.2\""}
+certifi = ">=2018.1.18"
+cryptography = ">=2.6.1"
+requests = ">=2.18.4"
+setuptools = ">=60.9.0"
+typing-extensions = ">=4.4.0"
+yarl = {version = ">=1.7.2", markers = "python_version >= \"3.5.2\""}
+
+[package.extras]
+qa = ["ruff (==0.0.292)", "tox (>=4.8.0)"]
+type = ["mypy (==1.6.1)"]
+
[[package]]
name = "certifi"
version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
+groups = ["main"]
files = [
{file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
{file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
]
+[[package]]
+name = "cffi"
+version = "1.17.1"
+description = "Foreign Function Interface for Python calling C code."
+optional = false
+python-versions = ">=3.8"
+groups = ["main"]
+files = [
+ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
+ {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
+ {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
+ {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
+ {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
+ {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
+ {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
+ {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
+ {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
+ {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
+ {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
+ {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
+ {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
+ {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
+ {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
+ {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
+ {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
+ {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
+ {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
+ {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
+ {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
+ {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
+ {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
+ {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
+ {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
+ {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
+ {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
+ {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
+ {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
+ {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
+ {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
+ {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
+ {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
+ {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
+ {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
+ {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
+ {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
+ {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
+ {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
+ {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
+ {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
+ {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
+ {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
+ {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
+ {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
+ {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
+ {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
+ {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
+ {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
+ {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
+ {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
+ {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
+ {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
+ {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
+ {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
+ {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
+ {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
+ {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
+ {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
+ {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
+ {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
+ {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
+ {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
+ {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
+ {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
+ {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
+ {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
+]
+
+[package.dependencies]
+pycparser = "*"
+
[[package]]
name = "charset-normalizer"
version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
@@ -393,6 +526,7 @@ version = "8.1.8"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
{file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
@@ -407,6 +541,7 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+groups = ["main", "dev"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
@@ -418,6 +553,7 @@ version = "1.3.2"
description = "Python library for calculating contours of 2D quadrilateral grids"
optional = false
python-versions = ">=3.10"
+groups = ["main"]
files = [
{file = "contourpy-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba38e3f9f330af820c4b27ceb4b9c7feee5fe0493ea53a8720f4792667465934"},
{file = "contourpy-1.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc41ba0714aa2968d1f8674ec97504a8f7e334f48eeacebcaa6256213acb0989"},
@@ -488,12 +624,73 @@ mypy = ["bokeh", "contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.15.0)", "
test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"]
+[[package]]
+name = "cryptography"
+version = "45.0.3"
+description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
+optional = false
+python-versions = "!=3.9.0,!=3.9.1,>=3.7"
+groups = ["main"]
+files = [
+ {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"},
+ {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"},
+ {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fae1e637f527750811588e4582988932c222f8251f7b7ea93739acb624e1487f"},
+ {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ca932e11218bcc9ef812aa497cdf669484870ecbcf2d99b765d6c27a86000942"},
+ {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af3f92b1dc25621f5fad065288a44ac790c5798e986a34d393ab27d2b27fcff9"},
+ {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2f8f8f0b73b885ddd7f3d8c2b2234a7d3ba49002b0223f58cfde1bedd9563c56"},
+ {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9cc80ce69032ffa528b5e16d217fa4d8d4bb7d6ba8659c1b4d74a1b0f4235fca"},
+ {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c824c9281cb628015bfc3c59335163d4ca0540d49de4582d6c2637312907e4b1"},
+ {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5833bb4355cb377ebd880457663a972cd044e7f49585aee39245c0d592904578"},
+ {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bb5bf55dcb69f7067d80354d0a348368da907345a2c448b0babc4215ccd3497"},
+ {file = "cryptography-45.0.3-cp311-abi3-win32.whl", hash = "sha256:3ad69eeb92a9de9421e1f6685e85a10fbcfb75c833b42cc9bc2ba9fb00da4710"},
+ {file = "cryptography-45.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:97787952246a77d77934d41b62fb1b6f3581d83f71b44796a4158d93b8f5c490"},
+ {file = "cryptography-45.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:c92519d242703b675ccefd0f0562eb45e74d438e001f8ab52d628e885751fb06"},
+ {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5edcb90da1843df85292ef3a313513766a78fbbb83f584a5a58fb001a5a9d57"},
+ {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38deed72285c7ed699864f964a3f4cf11ab3fb38e8d39cfcd96710cd2b5bb716"},
+ {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5555365a50efe1f486eed6ac7062c33b97ccef409f5970a0b6f205a7cfab59c8"},
+ {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9e4253ed8f5948a3589b3caee7ad9a5bf218ffd16869c516535325fece163dcc"},
+ {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cfd84777b4b6684955ce86156cfb5e08d75e80dc2585e10d69e47f014f0a5342"},
+ {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:a2b56de3417fd5f48773ad8e91abaa700b678dc7fe1e0c757e1ae340779acf7b"},
+ {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:57a6500d459e8035e813bd8b51b671977fb149a8c95ed814989da682314d0782"},
+ {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f22af3c78abfbc7cbcdf2c55d23c3e022e1a462ee2481011d518c7fb9c9f3d65"},
+ {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:232954730c362638544758a8160c4ee1b832dc011d2c41a306ad8f7cccc5bb0b"},
+ {file = "cryptography-45.0.3-cp37-abi3-win32.whl", hash = "sha256:cb6ab89421bc90e0422aca911c69044c2912fc3debb19bb3c1bfe28ee3dff6ab"},
+ {file = "cryptography-45.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:d54ae41e6bd70ea23707843021c778f151ca258081586f0cfa31d936ae43d1b2"},
+ {file = "cryptography-45.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed43d396f42028c1f47b5fec012e9e12631266e3825e95c00e3cf94d472dac49"},
+ {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:fed5aaca1750e46db870874c9c273cd5182a9e9deb16f06f7bdffdb5c2bde4b9"},
+ {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:00094838ecc7c6594171e8c8a9166124c1197b074cfca23645cee573910d76bc"},
+ {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:92d5f428c1a0439b2040435a1d6bc1b26ebf0af88b093c3628913dd464d13fa1"},
+ {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:ec64ee375b5aaa354b2b273c921144a660a511f9df8785e6d1c942967106438e"},
+ {file = "cryptography-45.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:71320fbefd05454ef2d457c481ba9a5b0e540f3753354fff6f780927c25d19b0"},
+ {file = "cryptography-45.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:edd6d51869beb7f0d472e902ef231a9b7689508e83880ea16ca3311a00bf5ce7"},
+ {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:555e5e2d3a53b4fabeca32835878b2818b3f23966a4efb0d566689777c5a12c8"},
+ {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:25286aacb947286620a31f78f2ed1a32cded7be5d8b729ba3fb2c988457639e4"},
+ {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:050ce5209d5072472971e6efbfc8ec5a8f9a841de5a4db0ebd9c2e392cb81972"},
+ {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dc10ec1e9f21f33420cc05214989544727e776286c1c16697178978327b95c9c"},
+ {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"},
+ {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"},
+]
+
+[package.dependencies]
+cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""}
+
+[package.extras]
+docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""]
+docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
+nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""]
+pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"]
+sdist = ["build (>=1.0.0)"]
+ssh = ["bcrypt (>=3.1.5)"]
+test = ["certifi (>=2024)", "cryptography-vectors (==45.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
+test-randomorder = ["pytest-randomly"]
+
[[package]]
name = "cycler"
version = "0.12.1"
description = "Composable style cycles"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
{file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
@@ -509,6 +706,7 @@ version = "0.7.1"
description = "XML bomb protection for Python stdlib modules"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+groups = ["main"]
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@@ -520,6 +718,7 @@ version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
+groups = ["main"]
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
@@ -531,6 +730,7 @@ version = "0.104.1"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "fastapi-0.104.1-py3-none-any.whl", hash = "sha256:752dc31160cdbd0436bb93bad51560b57e525cbb1d4bbf6f4904ceee75548241"},
{file = "fastapi-0.104.1.tar.gz", hash = "sha256:e5e4540a7c5e1dcfbbcf5b903c234feddcdcd881f191977a1c5dfd917487e7ae"},
@@ -551,6 +751,7 @@ version = "0.0.7"
description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "fastapi_cli-0.0.7-py3-none-any.whl", hash = "sha256:d549368ff584b2804336c61f192d86ddea080c11255f375959627911944804f4"},
{file = "fastapi_cli-0.0.7.tar.gz", hash = "sha256:02b3b65956f526412515907a0793c9094abd4bfb5457b389f645b0ea6ba3605e"},
@@ -570,6 +771,7 @@ version = "1.2.0"
description = "Infer file type and MIME type of any file/buffer. No external dependencies."
optional = false
python-versions = "*"
+groups = ["main"]
files = [
{file = "filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25"},
{file = "filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb"},
@@ -581,6 +783,7 @@ version = "6.1.0"
description = "the modular source code checker: pep8 pyflakes and co"
optional = false
python-versions = ">=3.8.1"
+groups = ["dev"]
files = [
{file = "flake8-6.1.0-py2.py3-none-any.whl", hash = "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"},
{file = "flake8-6.1.0.tar.gz", hash = "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23"},
@@ -597,6 +800,7 @@ version = "4.57.0"
description = "Tools to manipulate font files"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "fonttools-4.57.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:babe8d1eb059a53e560e7bf29f8e8f4accc8b6cfb9b5fd10e485bde77e71ef41"},
{file = "fonttools-4.57.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81aa97669cd726349eb7bd43ca540cf418b279ee3caba5e2e295fb4e8f841c02"},
@@ -651,18 +855,18 @@ files = [
]
[package.extras]
-all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"]
+all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0) ; python_version <= \"3.12\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"]
graphite = ["lz4 (>=1.7.4.2)"]
-interpolatable = ["munkres", "pycairo", "scipy"]
+interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""]
lxml = ["lxml (>=4.0)"]
pathops = ["skia-pathops (>=0.5.0)"]
plot = ["matplotlib"]
repacker = ["uharfbuzz (>=0.23.0)"]
symfont = ["sympy"]
-type1 = ["xattr"]
+type1 = ["xattr ; sys_platform == \"darwin\""]
ufo = ["fs (>=2.2.0,<3)"]
-unicode = ["unicodedata2 (>=15.1.0)"]
-woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
+unicode = ["unicodedata2 (>=15.1.0) ; python_version <= \"3.12\""]
+woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"]
[[package]]
name = "frozenlist"
@@ -670,6 +874,7 @@ version = "1.6.0"
description = "A list-like structure which implements collections.abc.MutableSequence"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "frozenlist-1.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e6e558ea1e47fd6fa8ac9ccdad403e5dd5ecc6ed8dda94343056fa4277d5c65e"},
{file = "frozenlist-1.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f4b3cd7334a4bbc0c472164f3744562cb72d05002cc6fcf58adb104630bbc352"},
@@ -783,6 +988,7 @@ version = "0.6.18"
description = "Google Ai Generativelanguage API client library"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "google_ai_generativelanguage-0.6.18-py3-none-any.whl", hash = "sha256:13d8174fea90b633f520789d32df7b422058fd5883b022989c349f1017db7fcf"},
{file = "google_ai_generativelanguage-0.6.18.tar.gz", hash = "sha256:274ba9fcf69466ff64e971d565884434388e523300afd468fc8e3033cd8e606e"},
@@ -792,7 +998,7 @@ files = [
google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0", extras = ["grpc"]}
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0"
proto-plus = [
- {version = ">=1.22.3,<2.0.0", markers = "python_version < \"3.13\""},
+ {version = ">=1.22.3,<2.0.0"},
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
]
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
@@ -803,6 +1009,7 @@ version = "2.24.2"
description = "Google API client core library"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "google_api_core-2.24.2-py3-none-any.whl", hash = "sha256:810a63ac95f3c441b7c0e43d344e372887f62ce9071ba972eacf32672e072de9"},
{file = "google_api_core-2.24.2.tar.gz", hash = "sha256:81718493daf06d96d6bc76a91c23874dbf2fac0adbbf542831b805ee6e974696"},
@@ -814,7 +1021,7 @@ googleapis-common-protos = ">=1.56.2,<2.0.0"
grpcio = {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}
grpcio-status = {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}
proto-plus = [
- {version = ">=1.22.3,<2.0.0", markers = "python_version < \"3.13\""},
+ {version = ">=1.22.3,<2.0.0"},
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
]
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
@@ -822,7 +1029,7 @@ requests = ">=2.18.0,<3.0.0"
[package.extras]
async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.dev0)"]
-grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"]
+grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev) ; python_version >= \"3.11\"", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0) ; python_version >= \"3.11\""]
grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
@@ -832,6 +1039,7 @@ version = "2.39.0"
description = "Google Authentication Library"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "google_auth-2.39.0-py2.py3-none-any.whl", hash = "sha256:0150b6711e97fb9f52fe599f55648950cc4540015565d8fbb31be2ad6e1548a2"},
{file = "google_auth-2.39.0.tar.gz", hash = "sha256:73222d43cdc35a3aeacbfdcaf73142a97839f10de930550d89ebfe1d0a00cde7"},
@@ -845,11 +1053,11 @@ rsa = ">=3.1.4,<5"
[package.extras]
aiohttp = ["aiohttp (>=3.6.2,<4.0.0)", "requests (>=2.20.0,<3.0.0)"]
enterprise-cert = ["cryptography", "pyopenssl"]
-pyjwt = ["cryptography (<39.0.0)", "cryptography (>=38.0.3)", "pyjwt (>=2.0)"]
-pyopenssl = ["cryptography (<39.0.0)", "cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"]
+pyjwt = ["cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "pyjwt (>=2.0)"]
+pyopenssl = ["cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"]
reauth = ["pyu2f (>=0.1.5)"]
requests = ["requests (>=2.20.0,<3.0.0)"]
-testing = ["aiohttp (<3.10.0)", "aiohttp (>=3.6.2,<4.0.0)", "aioresponses", "cryptography (<39.0.0)", "cryptography (>=38.0.3)", "flask", "freezegun", "grpcio", "mock", "oauth2client", "packaging", "pyjwt (>=2.0)", "pyopenssl (<24.3.0)", "pyopenssl (>=20.0.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-localserver", "pyu2f (>=0.1.5)", "requests (>=2.20.0,<3.0.0)", "responses", "urllib3"]
+testing = ["aiohttp (<3.10.0)", "aiohttp (>=3.6.2,<4.0.0)", "aioresponses", "cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "flask", "freezegun", "grpcio", "mock", "oauth2client", "packaging", "pyjwt (>=2.0)", "pyopenssl (<24.3.0)", "pyopenssl (>=20.0.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-localserver", "pyu2f (>=0.1.5)", "requests (>=2.20.0,<3.0.0)", "responses", "urllib3"]
urllib3 = ["packaging", "urllib3"]
[[package]]
@@ -858,6 +1066,7 @@ version = "1.70.0"
description = "Common protobufs used in Google APIs"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"},
{file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"},
@@ -875,6 +1084,8 @@ version = "3.2.1"
description = "Lightweight in-process concurrent programming"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
+markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"
files = [
{file = "greenlet-3.2.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:777c1281aa7c786738683e302db0f55eb4b0077c20f1dc53db8852ffaea0a6b0"},
{file = "greenlet-3.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3059c6f286b53ea4711745146ffe5a5c5ff801f62f6c56949446e0f6461f8157"},
@@ -943,6 +1154,7 @@ version = "0.23.1"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
{file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
@@ -962,6 +1174,7 @@ version = "1.71.0"
description = "HTTP/2-based RPC framework"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "grpcio-1.71.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:c200cb6f2393468142eb50ab19613229dcc7829b5ccee8b658a36005f6669fdd"},
{file = "grpcio-1.71.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b2266862c5ad664a380fbbcdbdb8289d71464c42a8c29053820ee78ba0119e5d"},
@@ -1025,6 +1238,7 @@ version = "1.62.3"
description = "Status proto mapping for gRPC"
optional = false
python-versions = ">=3.6"
+groups = ["main"]
files = [
{file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"},
{file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"},
@@ -1041,6 +1255,7 @@ version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
@@ -1052,6 +1267,7 @@ version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
@@ -1073,6 +1289,7 @@ version = "0.6.4"
description = "A collection of framework independent HTTP protocol utils."
optional = false
python-versions = ">=3.8.0"
+groups = ["main"]
files = [
{file = "httptools-0.6.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c73ce323711a6ffb0d247dcd5a550b8babf0f757e86a52558fe5b86d6fefcc0"},
{file = "httptools-0.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345c288418f0944a6fe67be8e6afa9262b18c7626c3ef3c28adc5eabc06a68da"},
@@ -1128,6 +1345,7 @@ version = "0.27.2"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
{file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
@@ -1141,7 +1359,7 @@ idna = "*"
sniffio = "*"
[package.extras]
-brotli = ["brotli", "brotlicffi"]
+brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
@@ -1153,6 +1371,7 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
+groups = ["main"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -1167,6 +1386,7 @@ version = "2.1.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"},
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
@@ -1178,6 +1398,7 @@ version = "5.13.2"
description = "A Python utility / library to sort Python imports."
optional = false
python-versions = ">=3.8.0"
+groups = ["dev"]
files = [
{file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"},
{file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"},
@@ -1192,6 +1413,7 @@ version = "0.9.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"},
{file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"},
@@ -1277,6 +1499,7 @@ version = "1.33"
description = "Apply JSON-Patches (RFC 6902)"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
+groups = ["main"]
files = [
{file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"},
{file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"},
@@ -1291,6 +1514,7 @@ version = "3.0.0"
description = "Identify specific nodes in a JSON document (RFC 6901)"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"},
{file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"},
@@ -1302,6 +1526,7 @@ version = "1.4.8"
description = "A fast implementation of the Cassowary constraint solver"
optional = false
python-versions = ">=3.10"
+groups = ["main"]
files = [
{file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db"},
{file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b"},
@@ -1391,6 +1616,7 @@ version = "0.3.0"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain-0.3.0-py3-none-any.whl", hash = "sha256:59a75a6a1eb7bfd2a6bf0c7a5816409a8fdc9046187b07af287b23b9899617af"},
{file = "langchain-0.3.0.tar.gz", hash = "sha256:a7c23892440bd1f5b9e029ff0dd709dd881ae927c4c0a3210ac64dba9bbf3f7f"},
@@ -1417,6 +1643,7 @@ version = "0.3.5"
description = "An integration package connecting AnthropicMessages and LangChain"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain_anthropic-0.3.5-py3-none-any.whl", hash = "sha256:bad34b02d7b4bdca9a9471bc391b01269fd8dc4600b83ca2a3e76925b7c27fe6"},
{file = "langchain_anthropic-0.3.5.tar.gz", hash = "sha256:2aa1673511056061680492871f386d68a8b62947e0eb1f15303ef10db16c8357"},
@@ -1434,6 +1661,7 @@ version = "0.3.56"
description = "Building applications with LLMs through composability"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain_core-0.3.56-py3-none-any.whl", hash = "sha256:a20c6aca0fa0da265d96d3b14a5a01828ac5d2d9d27516434873d76f2d4839ed"},
{file = "langchain_core-0.3.56.tar.gz", hash = "sha256:de896585bc56e12652327dcd195227c3739a07e86e587c91a07101e0df11dffe"},
@@ -1457,6 +1685,7 @@ version = "0.1.3"
description = "An integration package connecting DeepSeek and LangChain"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain_deepseek-0.1.3-py3-none-any.whl", hash = "sha256:8588e826371b417fca65c02f4273b4061eb9815a7bfcd5eb05acaa40d603aa89"},
{file = "langchain_deepseek-0.1.3.tar.gz", hash = "sha256:89dd6aa120fb50dcfcd3d593626d34c1c40deefe4510710d0807fcc19481adf5"},
@@ -1472,6 +1701,7 @@ version = "2.1.3"
description = "An integration package connecting Google's genai package and LangChain"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain_google_genai-2.1.3-py3-none-any.whl", hash = "sha256:adf222931ac7af543f4013751a9b7dbd9ed637fb4eb3e4e0cd7e1d5d7e066d36"},
{file = "langchain_google_genai-2.1.3.tar.gz", hash = "sha256:0d4e2abf01a7594a9420d3569cf2cd4239a01cc24c6698d3c2c92a072b9b7b4a"},
@@ -1489,6 +1719,7 @@ version = "0.2.3"
description = "An integration package connecting Groq and LangChain"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain_groq-0.2.3-py3-none-any.whl", hash = "sha256:3572c812acc1478ab0670c48eb9a135c95f47631190da750e48408267462a12d"},
{file = "langchain_groq-0.2.3.tar.gz", hash = "sha256:f94810fe734c9402b36273ddc3509eaa67f12a7d06b666c6ca472ab0bfdf37b7"},
@@ -1504,6 +1735,7 @@ version = "0.2.3"
description = "An integration package connecting Ollama and LangChain"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain_ollama-0.2.3-py3-none-any.whl", hash = "sha256:c47700ca68b013358b1e954493ecafb3bd10fa2cda71a9f15ba7897587a9aab2"},
{file = "langchain_ollama-0.2.3.tar.gz", hash = "sha256:d13fe8735176b652ca6e6656d7902c1265e8c0601097569f7c95433f3d034b38"},
@@ -1519,6 +1751,7 @@ version = "0.3.14"
description = "An integration package connecting OpenAI and LangChain"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain_openai-0.3.14-py3-none-any.whl", hash = "sha256:b8e648d2d7678a5540818199d141ff727c6f1514294b3e1e999a95357c9d66a0"},
{file = "langchain_openai-0.3.14.tar.gz", hash = "sha256:0662db78620c2e5c3ccfc1c36dc959c0ddc80e6bdf7ef81632cbf4b2cc9b9461"},
@@ -1535,6 +1768,7 @@ version = "0.3.8"
description = "LangChain text splitting utilities"
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["main"]
files = [
{file = "langchain_text_splitters-0.3.8-py3-none-any.whl", hash = "sha256:e75cc0f4ae58dcf07d9f18776400cf8ade27fadd4ff6d264df6278bb302f6f02"},
{file = "langchain_text_splitters-0.3.8.tar.gz", hash = "sha256:116d4b9f2a22dda357d0b79e30acf005c5518177971c66a9f1ab0edfdb0f912e"},
@@ -1549,6 +1783,7 @@ version = "0.2.56"
description = "Building stateful, multi-actor applications with LLMs"
optional = false
python-versions = "<4.0,>=3.9.0"
+groups = ["main"]
files = [
{file = "langgraph-0.2.56-py3-none-any.whl", hash = "sha256:ad8a4b772e34dc0137e890bb6ced596a39a1e684af66250c1e7c8150dbe90e9c"},
{file = "langgraph-0.2.56.tar.gz", hash = "sha256:af10b1ffd10d52fd4072a73f154b8c2513c0b22e5bd5d20f4567dfeecab98d1e"},
@@ -1565,6 +1800,7 @@ version = "2.0.25"
description = "Library with base interfaces for LangGraph checkpoint savers."
optional = false
python-versions = "<4.0.0,>=3.9.0"
+groups = ["main"]
files = [
{file = "langgraph_checkpoint-2.0.25-py3-none-any.whl", hash = "sha256:23416a0f5bc9dd712ac10918fc13e8c9c4530c419d2985a441df71a38fc81602"},
{file = "langgraph_checkpoint-2.0.25.tar.gz", hash = "sha256:77a63cab7b5f84dec1d49db561326ec28bdd48bcefb7fe4ac372069d2609287b"},
@@ -1580,6 +1816,7 @@ version = "0.1.63"
description = "SDK for interacting with LangGraph API"
optional = false
python-versions = "<4.0.0,>=3.9.0"
+groups = ["main"]
files = [
{file = "langgraph_sdk-0.1.63-py3-none-any.whl", hash = "sha256:6fb78a7fc6a30eea43bd0d6401dbc9e3263d0d4c03f63c04035980da7e586b05"},
{file = "langgraph_sdk-0.1.63.tar.gz", hash = "sha256:62bf2cc31e5aa6c5b9011ee1702bcf1e36e67e142a60bd97af2611162fb58e18"},
@@ -1595,6 +1832,7 @@ version = "0.1.147"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
+groups = ["main"]
files = [
{file = "langsmith-0.1.147-py3-none-any.whl", hash = "sha256:7166fc23b965ccf839d64945a78e9f1157757add228b086141eb03a60d699a15"},
{file = "langsmith-0.1.147.tar.gz", hash = "sha256:2e933220318a4e73034657103b3b1a3a6109cc5db3566a7e8e03be8d6d7def7a"},
@@ -1619,6 +1857,7 @@ version = "1.3.10"
description = "A super-fast templating language that borrows the best ideas from the existing templating languages."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59"},
{file = "mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28"},
@@ -1638,6 +1877,7 @@ version = "3.0.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
{file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
@@ -1662,6 +1902,7 @@ version = "3.0.2"
description = "Safely add untrusted strings to HTML/XML markup."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"},
{file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"},
@@ -1732,6 +1973,7 @@ version = "3.10.1"
description = "Python plotting package"
optional = false
python-versions = ">=3.10"
+groups = ["main"]
files = [
{file = "matplotlib-3.10.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ff2ae14910be903f4a24afdbb6d7d3a6c44da210fc7d42790b87aeac92238a16"},
{file = "matplotlib-3.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0721a3fd3d5756ed593220a8b86808a36c5031fce489adb5b31ee6dbb47dd5b2"},
@@ -1789,6 +2031,7 @@ version = "0.7.0"
description = "McCabe checker, plugin for flake8"
optional = false
python-versions = ">=3.6"
+groups = ["dev"]
files = [
{file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
@@ -1800,6 +2043,7 @@ version = "0.1.2"
description = "Markdown URL utilities"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
{file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
@@ -1811,6 +2055,7 @@ version = "6.4.3"
description = "multidict implementation"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "multidict-6.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:32a998bd8a64ca48616eac5a8c1cc4fa38fb244a3facf2eeb14abe186e0f6cc5"},
{file = "multidict-6.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a54ec568f1fc7f3c313c2f3b16e5db346bf3660e1309746e7fccbbfded856188"},
@@ -1924,6 +2169,7 @@ version = "1.1.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"},
{file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"},
@@ -1935,6 +2181,7 @@ version = "1.26.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
{file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
@@ -1980,6 +2227,7 @@ version = "0.4.8"
description = "The official Python client for Ollama."
optional = false
python-versions = "<4.0,>=3.8"
+groups = ["main"]
files = [
{file = "ollama-0.4.8-py3-none-any.whl", hash = "sha256:04312af2c5e72449aaebac4a2776f52ef010877c554103419d3f36066fe8af4c"},
{file = "ollama-0.4.8.tar.gz", hash = "sha256:1121439d49b96fa8339842965d0616eba5deb9f8c790786cdf4c0b3df4833802"},
@@ -1995,6 +2243,7 @@ version = "1.76.2"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"},
{file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"},
@@ -2021,6 +2270,7 @@ version = "3.10.17"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "orjson-3.10.17-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bc399cf138a0201d0bf2399b44195d33a0a5aee149dab114340da0d766c88b95"},
{file = "orjson-3.10.17-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59225b27b72e0e1626d869f7b987da6c74f9b6026cf9a87c1cdaf74ca9f7b8c0"},
@@ -2102,6 +2352,7 @@ version = "1.9.1"
description = "Fast, correct Python msgpack library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "ormsgpack-1.9.1-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:f1f804fd9c0fd84213a6022c34172f82323b34afa7052a4af18797582cf56365"},
{file = "ormsgpack-1.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eab5cec99c46276b37071d570aab98603f3d0309b3818da3247eb64bb95e5cfc"},
@@ -2152,6 +2403,7 @@ version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
@@ -2163,6 +2415,7 @@ version = "2.2.3"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
{file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
@@ -2248,6 +2501,7 @@ version = "0.12.1"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
@@ -2259,6 +2513,7 @@ version = "11.2.1"
description = "Python Imaging Library (Fork)"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"},
{file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"},
@@ -2349,7 +2604,7 @@ fpx = ["olefile"]
mic = ["olefile"]
test-arrow = ["pyarrow"]
tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"]
-typing = ["typing-extensions"]
+typing = ["typing-extensions ; python_version < \"3.10\""]
xmp = ["defusedxml"]
[[package]]
@@ -2358,6 +2613,7 @@ version = "4.3.7"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "platformdirs-4.3.7-py3-none-any.whl", hash = "sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94"},
{file = "platformdirs-4.3.7.tar.gz", hash = "sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351"},
@@ -2374,6 +2630,7 @@ version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
@@ -2389,6 +2646,7 @@ version = "3.0.51"
description = "Library for building powerful interactive command lines in Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07"},
{file = "prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed"},
@@ -2403,6 +2661,7 @@ version = "0.3.1"
description = "Accelerated property cache"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "propcache-0.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f27785888d2fdd918bc36de8b8739f2d6c791399552333721b58193f68ea3e98"},
{file = "propcache-0.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4e89cde74154c7b5957f87a355bb9c8ec929c167b59c83d90654ea36aeb6180"},
@@ -2510,6 +2769,7 @@ version = "1.26.1"
description = "Beautiful, Pythonic protocol buffers"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66"},
{file = "proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012"},
@@ -2527,6 +2787,7 @@ version = "6.30.2"
description = ""
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "protobuf-6.30.2-cp310-abi3-win32.whl", hash = "sha256:b12ef7df7b9329886e66404bef5e9ce6a26b54069d7f7436a0853ccdeb91c103"},
{file = "protobuf-6.30.2-cp310-abi3-win_amd64.whl", hash = "sha256:7653c99774f73fe6b9301b87da52af0e69783a2e371e8b599b3e9cb4da4b12b9"},
@@ -2545,6 +2806,7 @@ version = "0.6.1"
description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"},
{file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"},
@@ -2556,6 +2818,7 @@ version = "0.4.2"
description = "A collection of ASN.1-based protocols modules"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a"},
{file = "pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6"},
@@ -2564,23 +2827,139 @@ files = [
[package.dependencies]
pyasn1 = ">=0.6.1,<0.7.0"
+[[package]]
+name = "pycares"
+version = "4.8.0"
+description = "Python interface for c-ares"
+optional = false
+python-versions = ">=3.9"
+groups = ["main"]
+files = [
+ {file = "pycares-4.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f40d9f4a8de398b110fdf226cdfadd86e8c7eb71d5298120ec41cf8d94b0012f"},
+ {file = "pycares-4.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:339de06fc849a51015968038d2bbed68fc24047522404af9533f32395ca80d25"},
+ {file = "pycares-4.8.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372a236c1502b9056b0bea195c64c329603b4efa70b593a33b7ae37fbb7fad00"},
+ {file = "pycares-4.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03f66a5e143d102ccc204bd4e29edd70bed28420f707efd2116748241e30cb73"},
+ {file = "pycares-4.8.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ef50504296cd5fc58cfd6318f82e20af24fbe2c83004f6ff16259adb13afdf14"},
+ {file = "pycares-4.8.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1bc541b627c7951dd36136b18bd185c5244a0fb2af5b1492ffb8acaceec1c5b"},
+ {file = "pycares-4.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:938d188ed6bed696099be67ebdcdf121827b9432b17a9ea9e40dc35fd9d85363"},
+ {file = "pycares-4.8.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:327837ffdc0c7adda09c98e1263c64b2aff814eea51a423f66733c75ccd9a642"},
+ {file = "pycares-4.8.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a6b9b8d08c4508c45bd39e0c74e9e7052736f18ca1d25a289365bb9ac36e5849"},
+ {file = "pycares-4.8.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:feac07d5e6d2d8f031c71237c21c21b8c995b41a1eba64560e8cf1e42ac11bc6"},
+ {file = "pycares-4.8.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5bcdbf37012fd2323ca9f2a1074421a9ccf277d772632f8f0ce8c46ec7564250"},
+ {file = "pycares-4.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e3ebb692cb43fcf34fe0d26f2cf9a0ea53fdfb136463845b81fad651277922db"},
+ {file = "pycares-4.8.0-cp310-cp310-win32.whl", hash = "sha256:d98447ec0efff3fa868ccc54dcc56e71faff498f8848ecec2004c3108efb4da2"},
+ {file = "pycares-4.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:1abb8f40917960ead3c2771277f0bdee1967393b0fdf68743c225b606787da68"},
+ {file = "pycares-4.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5e25db89005ddd8d9c5720293afe6d6dd92e682fc6bc7a632535b84511e2060d"},
+ {file = "pycares-4.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6f9665ef116e6ee216c396f5f927756c2164f9f3316aec7ff1a9a1e1e7ec9b2a"},
+ {file = "pycares-4.8.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54a96893133471f6889b577147adcc21a480dbe316f56730871028379c8313f3"},
+ {file = "pycares-4.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51024b3a69762bd3100d94986a29922be15e13f56f991aaefb41f5bcd3d7f0bb"},
+ {file = "pycares-4.8.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47ff9db50c599e4d965ae3bec99cc30941c1d2b0f078ec816680b70d052dd54a"},
+ {file = "pycares-4.8.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27ef8ff4e0f60ea6769a60d1c3d1d2aefed1d832e7bb83fc3934884e2dba5cdd"},
+ {file = "pycares-4.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63511af7a3f9663f562fbb6bfa3591a259505d976e2aba1fa2da13dde43c6ca7"},
+ {file = "pycares-4.8.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:73c3219b47616e6a5ad1810de96ed59721c7751f19b70ae7bf24997a8365408f"},
+ {file = "pycares-4.8.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:da42a45207c18f37be5e491c14b6d1063cfe1e46620eb661735d0cedc2b59099"},
+ {file = "pycares-4.8.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8a068e898bb5dd09cd654e19cd2abf20f93d0cc59d5d955135ed48ea0f806aa1"},
+ {file = "pycares-4.8.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:962aed95675bb66c0b785a2fbbd1bb58ce7f009e283e4ef5aaa4a1f2dc00d217"},
+ {file = "pycares-4.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ce8b1a16c1e4517a82a0ebd7664783a327166a3764d844cf96b1fb7b9dd1e493"},
+ {file = "pycares-4.8.0-cp311-cp311-win32.whl", hash = "sha256:b3749ddbcbd216376c3b53d42d8b640b457133f1a12b0e003f3838f953037ae7"},
+ {file = "pycares-4.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:5ce8a4e1b485b2360ab666c4ea1db97f57ede345a3b566d80bfa52b17e616610"},
+ {file = "pycares-4.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3273e01a75308ed06d2492d83c7ba476e579a60a24d9f20fe178ce5e9d8d028b"},
+ {file = "pycares-4.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fcedaadea1f452911fd29935749f98d144dae758d6003b7e9b6c5d5bd47d1dff"},
+ {file = "pycares-4.8.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aae6cb33e287e06a4aabcbc57626df682c9a4fa8026207f5b498697f1c2fb562"},
+ {file = "pycares-4.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25038b930e5be82839503fb171385b2aefd6d541bc5b7da0938bdb67780467d2"},
+ {file = "pycares-4.8.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cc8499b6e7dfbe4af65f6938db710ce9acd1debf34af2cbb93b898b1e5da6a5a"},
+ {file = "pycares-4.8.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c4e1c6a68ef56a7622f6176d9946d4e51f3c853327a0123ef35a5380230c84cd"},
+ {file = "pycares-4.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7cc8c3c9114b9c84e4062d25ca9b4bddc80a65d0b074c7cb059275273382f89"},
+ {file = "pycares-4.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4404014069d3e362abf404c9932d4335bb9c07ba834cfe7d683c725b92e0f9da"},
+ {file = "pycares-4.8.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ee0a58c32ec2a352cef0e1d20335a7caf9871cd79b73be2ca2896fe70f09c9d7"},
+ {file = "pycares-4.8.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:35f32f52b486b8fede3cbebf088f30b01242d0321b5216887c28e80490595302"},
+ {file = "pycares-4.8.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ecbb506e27a3b3a2abc001c77beeccf265475c84b98629a6b3e61bd9f2987eaa"},
+ {file = "pycares-4.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9392b2a34adbf60cb9e38f4a0d363413ecea8d835b5a475122f50f76676d59dd"},
+ {file = "pycares-4.8.0-cp312-cp312-win32.whl", hash = "sha256:f0fbefe68403ffcff19c869b8d621c88a6d2cef18d53cf0dab0fa9458a6ca712"},
+ {file = "pycares-4.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa8aab6085a2ddfb1b43a06ddf1b498347117bb47cd620d9b12c43383c9c2737"},
+ {file = "pycares-4.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:358a9a2c6fed59f62788e63d88669224955443048a1602016d4358e92aedb365"},
+ {file = "pycares-4.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e3e1278967fa8d4a0056be3fcc8fc551b8bad1fc7d0e5172196dccb8ddb036a"},
+ {file = "pycares-4.8.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79befb773e370a8f97de9f16f5ea2c7e7fa0e3c6c74fbea6d332bf58164d7d06"},
+ {file = "pycares-4.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b00d3695db64ce98a34e632e1d53f5a1cdb25451489f227bec2a6c03ff87ee8"},
+ {file = "pycares-4.8.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:37bdc4f2ff0612d60fc4f7547e12ff02cdcaa9a9e42e827bb64d4748994719f1"},
+ {file = "pycares-4.8.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd92c44498ec7a6139888b464b28c49f7ba975933689bd67ea8d572b94188404"},
+ {file = "pycares-4.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2665a0d810e2bbc41e97f3c3e5ea7950f666b3aa19c5f6c99d6b018ccd2e0052"},
+ {file = "pycares-4.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45a629a6470a33478514c566bce50c63f1b17d1c5f2f964c9a6790330dc105fb"},
+ {file = "pycares-4.8.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:47bb378f1773f41cca8e31dcdf009ce4a9b8aff8a30c7267aaff9a099c407ba5"},
+ {file = "pycares-4.8.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fb3feae38458005cc101956e38f16eb3145fff8cd793e35cd4bdef6bf1aa2623"},
+ {file = "pycares-4.8.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:14bc28aeaa66b0f4331ac94455e8043c8a06b3faafd78cc49d4b677bae0d0b08"},
+ {file = "pycares-4.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:62c82b871470f2864a1febf7b96bb1d108ce9063e6d3d43727e8a46f0028a456"},
+ {file = "pycares-4.8.0-cp313-cp313-win32.whl", hash = "sha256:01afa8964c698c8f548b46d726f766aa7817b2d4386735af1f7996903d724920"},
+ {file = "pycares-4.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:22f86f81b12ab17b0a7bd0da1e27938caaed11715225c1168763af97f8bb51a7"},
+ {file = "pycares-4.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:61325d13a95255e858f42a7a1a9e482ff47ef2233f95ad9a4f308a3bd8ecf903"},
+ {file = "pycares-4.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dfec3a7d42336fa46a1e7e07f67000fd4b97860598c59a894c08f81378629e4e"},
+ {file = "pycares-4.8.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b65067e4b4f5345688817fff6be06b9b1f4ec3619b0b9ecc639bc681b73f646b"},
+ {file = "pycares-4.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0322ad94bbaa7016139b5bbdcd0de6f6feb9d146d69e03a82aaca342e06830a6"},
+ {file = "pycares-4.8.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:456c60f170c997f9a43c7afa1085fced8efb7e13ae49dd5656f998ae13c4bdb4"},
+ {file = "pycares-4.8.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57a2c4c9ce423a85b0e0227409dbaf0d478f5e0c31d9e626768e77e1e887d32f"},
+ {file = "pycares-4.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:478d9c479108b7527266864c0affe3d6e863492c9bc269217e36100c8fd89b91"},
+ {file = "pycares-4.8.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aed56bca096990ca0aa9bbf95761fc87e02880e04b0845922b5c12ea9abe523f"},
+ {file = "pycares-4.8.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ef265a390928ee2f77f8901c2273c53293157860451ad453ce7f45dd268b72f9"},
+ {file = "pycares-4.8.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a5f17d7a76d8335f1c90a8530c8f1e8bb22e9a1d70a96f686efaed946de1c908"},
+ {file = "pycares-4.8.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:891f981feb2ef34367378f813fc17b3d706ce95b6548eeea0c9fe7705d7e54b1"},
+ {file = "pycares-4.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4102f6d9117466cc0a1f527907a1454d109cc9e8551b8074888071ef16050fe3"},
+ {file = "pycares-4.8.0-cp39-cp39-win32.whl", hash = "sha256:d6775308659652adc88c82c53eda59b5e86a154aaba5ad1e287bbb3e0be77076"},
+ {file = "pycares-4.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:8bc05462aa44788d48544cca3d2532466fed2cdc5a2f24a43a92b620a61c9d19"},
+ {file = "pycares-4.8.0.tar.gz", hash = "sha256:2fc2ebfab960f654b3e3cf08a732486950da99393a657f8b44618ad3ed2d39c1"},
+]
+
+[package.dependencies]
+cffi = ">=1.5.0"
+
+[package.extras]
+idna = ["idna (>=2.1)"]
+
[[package]]
name = "pycodestyle"
version = "2.11.1"
description = "Python style guide checker"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"},
{file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"},
]
+[[package]]
+name = "pycoingecko"
+version = "3.2.0"
+description = "Python wrapper around the CoinGecko API"
+optional = false
+python-versions = "*"
+groups = ["main"]
+files = [
+ {file = "pycoingecko-3.2.0-py3-none-any.whl", hash = "sha256:b4fec16e9420b3e2f1ae38ea95710b631c9809cf1bffbf0021cc6608e357925b"},
+ {file = "pycoingecko-3.2.0.tar.gz", hash = "sha256:c48935d6e608efd9a798cbc4be30f1b43176cb680503dfef8f9352e038fc7605"},
+]
+
+[package.dependencies]
+requests = "*"
+
+[[package]]
+name = "pycparser"
+version = "2.22"
+description = "C parser in Python"
+optional = false
+python-versions = ">=3.8"
+groups = ["main"]
+files = [
+ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
+ {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
+]
+
[[package]]
name = "pydantic"
version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
{file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
@@ -2594,7 +2973,7 @@ typing-inspection = ">=0.4.0"
[package.extras]
email = ["email-validator (>=2.0.0)"]
-timezone = ["tzdata"]
+timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
[[package]]
name = "pydantic-core"
@@ -2602,6 +2981,7 @@ version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
@@ -2713,6 +3093,7 @@ version = "3.1.0"
description = "passive checker of Python programs"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pyflakes-3.1.0-py2.py3-none-any.whl", hash = "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774"},
{file = "pyflakes-3.1.0.tar.gz", hash = "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"},
@@ -2724,6 +3105,7 @@ version = "2.19.1"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"},
{file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"},
@@ -2738,6 +3120,7 @@ version = "3.2.3"
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf"},
{file = "pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be"},
@@ -2752,6 +3135,7 @@ version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -2772,6 +3156,7 @@ version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["main"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -2786,6 +3171,7 @@ version = "1.0.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"},
{file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"},
@@ -2800,6 +3186,7 @@ version = "2025.2"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
+groups = ["main"]
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -2811,6 +3198,7 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -2873,6 +3261,7 @@ version = "2.1.0"
description = "Python library to build pretty command line user prompts ⭐️"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "questionary-2.1.0-py3-none-any.whl", hash = "sha256:44174d237b68bc828e4878c763a9ad6790ee61990e0ae72927694ead57bab8ec"},
{file = "questionary-2.1.0.tar.gz", hash = "sha256:6302cdd645b19667d8f6e6634774e9538bfcd1aad9be287e743d96cacaf95587"},
@@ -2887,6 +3276,7 @@ version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
@@ -2990,6 +3380,7 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -3011,6 +3402,7 @@ version = "1.0.0"
description = "A utility belt for advanced users of python-requests"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+groups = ["main"]
files = [
{file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
{file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
@@ -3025,6 +3417,7 @@ version = "13.9.4"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
optional = false
python-versions = ">=3.8.0"
+groups = ["main"]
files = [
{file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"},
{file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"},
@@ -3043,6 +3436,7 @@ version = "0.14.4"
description = "Rich toolkit for building command-line applications"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "rich_toolkit-0.14.4-py3-none-any.whl", hash = "sha256:cc71ebee83eaa122d8e42882408bc5a4bf0240bbf1e368811ee56d249b3d742a"},
{file = "rich_toolkit-0.14.4.tar.gz", hash = "sha256:db256cf45165cae381c9bbf3b48a0fd4d99a07c80155cc655c80212a62e28fe1"},
@@ -3059,6 +3453,7 @@ version = "4.9.1"
description = "Pure-Python RSA implementation"
optional = false
python-versions = "<4,>=3.6"
+groups = ["main"]
files = [
{file = "rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762"},
{file = "rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75"},
@@ -3067,12 +3462,34 @@ files = [
[package.dependencies]
pyasn1 = ">=0.1.3"
+[[package]]
+name = "setuptools"
+version = "80.9.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.9"
+groups = ["main"]
+files = [
+ {file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"},
+ {file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"},
+]
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""]
+core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+enabler = ["pytest-enabler (>=2.2)"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
+type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"]
+
[[package]]
name = "shellingham"
version = "1.5.4"
description = "Tool to Detect Surrounding Shell"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"},
{file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"},
@@ -3084,6 +3501,7 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["main"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -3095,6 +3513,7 @@ version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -3106,6 +3525,7 @@ version = "2.0.40"
description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "SQLAlchemy-2.0.40-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ae9597cab738e7cc823f04a704fb754a9249f0b6695a6aeb63b74055cd417a96"},
{file = "SQLAlchemy-2.0.40-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a5c21ab099a83d669ebb251fddf8f5cee4d75ea40a5a1653d9c43d60e20867"},
@@ -3201,6 +3621,7 @@ version = "0.27.0"
description = "The little ASGI library that shines."
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
{file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
@@ -3218,6 +3639,7 @@ version = "0.9.0"
description = "Pretty-print tabular data"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"},
{file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"},
@@ -3232,6 +3654,7 @@ version = "8.5.0"
description = "Retry code until it succeeds"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"},
{file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"},
@@ -3247,6 +3670,7 @@ version = "0.9.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
@@ -3294,6 +3718,7 @@ version = "4.67.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
{file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
@@ -3315,6 +3740,7 @@ version = "0.15.3"
description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "typer-0.15.3-py3-none-any.whl", hash = "sha256:c86a65ad77ca531f03de08d1b9cb67cd09ad02ddddf4b34745b5008f43b239bd"},
{file = "typer-0.15.3.tar.gz", hash = "sha256:818873625d0569653438316567861899f7e9972f2e6e0c16dab608345ced713c"},
@@ -3332,6 +3758,7 @@ version = "4.13.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"},
{file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"},
@@ -3343,6 +3770,7 @@ version = "0.4.0"
description = "Runtime typing introspection tools"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"},
{file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"},
@@ -3357,6 +3785,7 @@ version = "2025.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
+groups = ["main"]
files = [
{file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"},
{file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"},
@@ -3368,13 +3797,14 @@ version = "2.4.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"},
{file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -3385,6 +3815,7 @@ version = "0.34.2"
description = "The lightning-fast ASGI server."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"},
{file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"},
@@ -3397,12 +3828,12 @@ h11 = ">=0.8"
httptools = {version = ">=0.6.3", optional = true, markers = "extra == \"standard\""}
python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""}
-uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
+uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""}
[package.extras]
-standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
+standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"]
[[package]]
name = "uvloop"
@@ -3410,6 +3841,8 @@ version = "0.21.0"
description = "Fast implementation of asyncio event loop on top of libuv"
optional = false
python-versions = ">=3.8.0"
+groups = ["main"]
+markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\""
files = [
{file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"},
{file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"},
@@ -3461,6 +3894,7 @@ version = "1.0.5"
description = "Simple, modern and high performance file watching and code reload in python."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "watchfiles-1.0.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5c40fe7dd9e5f81e0847b1ea64e1f5dd79dd61afbedb57759df06767ac719b40"},
{file = "watchfiles-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c0db396e6003d99bb2d7232c957b5f0b5634bbd1b24e381a5afcc880f7373fb"},
@@ -3544,6 +3978,7 @@ version = "0.2.13"
description = "Measures the displayed width of unicode strings in a terminal"
optional = false
python-versions = "*"
+groups = ["main"]
files = [
{file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"},
{file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"},
@@ -3555,6 +3990,7 @@ version = "15.0.1"
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"},
{file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"},
@@ -3633,6 +4069,7 @@ version = "1.20.0"
description = "Yet another URL library"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "yarl-1.20.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f1f6670b9ae3daedb325fa55fbe31c22c8228f6e0b513772c2e1c623caa6ab22"},
{file = "yarl-1.20.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85a231fa250dfa3308f3c7896cc007a47bc76e9e8e8595c20b7426cac4884c62"},
@@ -3746,6 +4183,6 @@ multidict = ">=4.0"
propcache = ">=0.2.1"
[metadata]
-lock-version = "2.0"
+lock-version = "2.1"
python-versions = "^3.11"
-content-hash = "4b63c0cbfd37b7262e6b1364ae4e7ce6c8b410933f3f8e62f12af5ed9646203e"
+content-hash = "1d76bc97b9beabddbb2dcc6f705f3b85a4f3ca1fe8b7fc5b54ccfd11d1b2b305"
diff --git a/pyproject.toml b/pyproject.toml
index 1483d487b..684d8bf2f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,6 +27,9 @@ colorama = "^0.4.6"
questionary = "^2.1.0"
rich = "^13.9.4"
langchain-google-genai = "^2.0.11"
+# Crypto dependencies
+ccxt = "^4.2.69"
+pycoingecko = "^3.1.0"
# Backend dependencies
fastapi = {extras = ["standard"], version = "^0.104.0"}
fastapi-cli = "^0.0.7"
diff --git a/src/__init__.py b/src/__init__.py
index e69de29bb..c0df8b86f 100644
--- a/src/__init__.py
+++ b/src/__init__.py
@@ -0,0 +1,3 @@
+from .config import ALLOW_MARGIN, IS_CRYPTO
+
+__all__ = ["IS_CRYPTO", "ALLOW_MARGIN"]
diff --git a/src/agents/aswath_damodaran.py b/src/agents/aswath_damodaran.py
index 2f2a1ecad..19d4cd731 100644
--- a/src/agents/aswath_damodaran.py
+++ b/src/agents/aswath_damodaran.py
@@ -1,13 +1,14 @@
from __future__ import annotations
import json
-from typing_extensions import Literal
-from pydantic import BaseModel
+from typing import Any
-from src.graph.state import AgentState, show_agent_reasoning
-from langchain_core.prompts import ChatPromptTemplate
from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
+from pydantic import BaseModel
+from typing_extensions import Literal
+from src.graph.state import AgentState, show_agent_reasoning
from src.tools.api import (
get_financial_metrics,
get_market_cap,
@@ -19,7 +20,7 @@
class AswathDamodaranSignal(BaseModel):
signal: Literal["bullish", "bearish", "neutral"]
- confidence: float # 0‒100
+ confidence: float # 0‒100
reasoning: str
@@ -32,9 +33,9 @@ def aswath_damodaran_agent(state: AgentState):
• Cross‑check with relative valuation (PE vs. Fwd PE sector median proxy)
Produces a trading signal and explanation in Damodaran’s analytical voice.
"""
- data = state["data"]
- end_date = data["end_date"]
- tickers = data["tickers"]
+ data = state["data"]
+ end_date = data["end_date"]
+ tickers = data["tickers"]
analysis_data: dict[str, dict] = {}
damodaran_signals: dict[str, dict] = {}
@@ -77,17 +78,11 @@ def aswath_damodaran_agent(state: AgentState):
relative_val_analysis = analyze_relative_valuation(metrics)
# ─── Score & margin of safety ──────────────────────────────────────────
- total_score = (
- growth_analysis["score"]
- + risk_analysis["score"]
- + relative_val_analysis["score"]
- )
+ total_score = growth_analysis["score"] + risk_analysis["score"] + relative_val_analysis["score"]
max_score = growth_analysis["max_score"] + risk_analysis["max_score"] + relative_val_analysis["max_score"]
intrinsic_value = intrinsic_val_analysis["intrinsic_value"]
- margin_of_safety = (
- (intrinsic_value - market_cap) / market_cap if intrinsic_value and market_cap else None
- )
+ margin_of_safety = (intrinsic_value - market_cap) / market_cap if intrinsic_value and market_cap else None
# Decision rules (Damodaran tends to act with ~20‑25 % MOS)
if margin_of_safety is not None and margin_of_safety >= 0.25:
@@ -139,7 +134,7 @@ def aswath_damodaran_agent(state: AgentState):
# ────────────────────────────────────────────────────────────────────────────────
# Helper analyses
# ────────────────────────────────────────────────────────────────────────────────
-def analyze_growth_and_reinvestment(metrics: list, line_items: list) -> dict[str, any]:
+def analyze_growth_and_reinvestment(metrics: list, line_items: list) -> dict[str, Any]:
"""
Growth score (0‑4):
+2 5‑yr CAGR of revenue > 8 %
@@ -189,7 +184,7 @@ def analyze_growth_and_reinvestment(metrics: list, line_items: list) -> dict[str
return {"score": score, "max_score": max_score, "details": "; ".join(details), "metrics": latest.model_dump()}
-def analyze_risk_profile(metrics: list, line_items: list) -> dict[str, any]:
+def analyze_risk_profile(metrics: list, line_items: list) -> dict[str, Any]:
"""
Risk score (0‑3):
+1 Beta < 1.3
@@ -250,7 +245,7 @@ def analyze_risk_profile(metrics: list, line_items: list) -> dict[str, any]:
}
-def analyze_relative_valuation(metrics: list) -> dict[str, any]:
+def analyze_relative_valuation(metrics: list) -> dict[str, Any]:
"""
Simple PE check vs. historical median (proxy since sector comps unavailable):
+1 if TTM P/E < 70 % of 5‑yr median
@@ -281,7 +276,7 @@ def analyze_relative_valuation(metrics: list) -> dict[str, any]:
# ────────────────────────────────────────────────────────────────────────────────
# Intrinsic value via FCFF DCF (Damodaran style)
# ────────────────────────────────────────────────────────────────────────────────
-def calculate_intrinsic_value_dcf(metrics: list, line_items: list, risk_analysis: dict) -> dict[str, any]:
+def calculate_intrinsic_value_dcf(metrics: list, line_items: list, risk_analysis: dict) -> dict[str, Any]:
"""
FCFF DCF with:
• Base FCFF = latest free cash flow
@@ -322,12 +317,7 @@ def calculate_intrinsic_value_dcf(metrics: list, line_items: list, risk_analysis
g += g_step
# Terminal value (perpetuity with terminal growth)
- tv = (
- fcff0
- * (1 + terminal_growth)
- / (discount - terminal_growth)
- / (1 + discount) ** years
- )
+ tv = fcff0 * (1 + terminal_growth) / (discount - terminal_growth) / (1 + discount) ** years
equity_value = pv_sum + tv
intrinsic_per_share = equity_value / shares
@@ -348,8 +338,8 @@ def calculate_intrinsic_value_dcf(metrics: list, line_items: list, risk_analysis
def estimate_cost_of_equity(beta: float | None) -> float:
"""CAPM: r_e = r_f + β × ERP (use Damodaran’s long‑term averages)."""
- risk_free = 0.04 # 10‑yr US Treasury proxy
- erp = 0.05 # long‑run US equity risk premium
+ risk_free = 0.04 # 10‑yr US Treasury proxy
+ erp = 0.05 # long‑run US equity risk premium
beta = beta if beta is not None else 1.0
return risk_free + beta * erp
@@ -359,7 +349,7 @@ def estimate_cost_of_equity(beta: float | None) -> float:
# ────────────────────────────────────────────────────────────────────────────────
def generate_damodaran_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> AswathDamodaranSignal:
diff --git a/src/agents/ben_graham.py b/src/agents/ben_graham.py
index 304d04c5a..f2d2dadba 100644
--- a/src/agents/ben_graham.py
+++ b/src/agents/ben_graham.py
@@ -1,13 +1,16 @@
-from src.graph.state import AgentState, show_agent_reasoning
-from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
-from langchain_core.prompts import ChatPromptTemplate
+import json
+import math
+from typing import Any
+
from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel
-import json
from typing_extensions import Literal
-from src.utils.progress import progress
+
+from src.graph.state import AgentState, show_agent_reasoning
+from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
from src.utils.llm import call_llm
-import math
+from src.utils.progress import progress
class BenGrahamSignal(BaseModel):
@@ -279,7 +282,7 @@ def analyze_valuation_graham(financial_line_items: list, market_cap: float) -> d
def generate_graham_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> BenGrahamSignal:
diff --git a/src/agents/bill_ackman.py b/src/agents/bill_ackman.py
index 198909ece..8ab09e9bb 100644
--- a/src/agents/bill_ackman.py
+++ b/src/agents/bill_ackman.py
@@ -1,13 +1,16 @@
-from langchain_openai import ChatOpenAI
-from src.graph.state import AgentState, show_agent_reasoning
-from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
-from langchain_core.prompts import ChatPromptTemplate
+import json
+from typing import Any
+
from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_openai import ChatOpenAI
from pydantic import BaseModel
-import json
from typing_extensions import Literal
-from src.utils.progress import progress
+
+from src.graph.state import AgentState, show_agent_reasoning
+from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
from src.utils.llm import call_llm
+from src.utils.progress import progress
class BillAckmanSignal(BaseModel):
@@ -25,14 +28,14 @@ def bill_ackman_agent(state: AgentState):
data = state["data"]
end_date = data["end_date"]
tickers = data["tickers"]
-
+
analysis_data = {}
ackman_analysis = {}
-
+
for ticker in tickers:
progress.update_status("bill_ackman_agent", ticker, "Fetching financial metrics")
metrics = get_financial_metrics(ticker, end_date, period="annual", limit=5)
-
+
progress.update_status("bill_ackman_agent", ticker, "Gathering financial line items")
# Request multiple periods of data (annual or TTM) for a more robust long-term view.
financial_line_items = search_line_items(
@@ -51,33 +54,28 @@ def bill_ackman_agent(state: AgentState):
],
end_date,
period="annual",
- limit=5
+ limit=5,
)
-
+
progress.update_status("bill_ackman_agent", ticker, "Getting market cap")
market_cap = get_market_cap(ticker, end_date)
-
+
progress.update_status("bill_ackman_agent", ticker, "Analyzing business quality")
quality_analysis = analyze_business_quality(metrics, financial_line_items)
-
+
progress.update_status("bill_ackman_agent", ticker, "Analyzing balance sheet and capital structure")
balance_sheet_analysis = analyze_financial_discipline(metrics, financial_line_items)
-
+
progress.update_status("bill_ackman_agent", ticker, "Analyzing activism potential")
activism_analysis = analyze_activism_potential(financial_line_items)
-
+
progress.update_status("bill_ackman_agent", ticker, "Calculating intrinsic value & margin of safety")
valuation_analysis = analyze_valuation(financial_line_items, market_cap)
-
+
# Combine partial scores or signals
- total_score = (
- quality_analysis["score"]
- + balance_sheet_analysis["score"]
- + activism_analysis["score"]
- + valuation_analysis["score"]
- )
+ total_score = quality_analysis["score"] + balance_sheet_analysis["score"] + activism_analysis["score"] + valuation_analysis["score"]
max_possible_score = 20 # Adjust weighting as desired (5 from each sub-analysis, for instance)
-
+
# Generate a simple buy/hold/sell (bullish/neutral/bearish) signal
if total_score >= 0.7 * max_possible_score:
signal = "bullish"
@@ -85,52 +83,34 @@ def bill_ackman_agent(state: AgentState):
signal = "bearish"
else:
signal = "neutral"
-
- analysis_data[ticker] = {
- "signal": signal,
- "score": total_score,
- "max_score": max_possible_score,
- "quality_analysis": quality_analysis,
- "balance_sheet_analysis": balance_sheet_analysis,
- "activism_analysis": activism_analysis,
- "valuation_analysis": valuation_analysis
- }
-
+
+ analysis_data[ticker] = {"signal": signal, "score": total_score, "max_score": max_possible_score, "quality_analysis": quality_analysis, "balance_sheet_analysis": balance_sheet_analysis, "activism_analysis": activism_analysis, "valuation_analysis": valuation_analysis}
+
progress.update_status("bill_ackman_agent", ticker, "Generating Bill Ackman analysis")
ackman_output = generate_ackman_output(
- ticker=ticker,
+ ticker=ticker,
analysis_data=analysis_data,
model_name=state["metadata"]["model_name"],
model_provider=state["metadata"]["model_provider"],
)
-
- ackman_analysis[ticker] = {
- "signal": ackman_output.signal,
- "confidence": ackman_output.confidence,
- "reasoning": ackman_output.reasoning
- }
-
+
+ ackman_analysis[ticker] = {"signal": ackman_output.signal, "confidence": ackman_output.confidence, "reasoning": ackman_output.reasoning}
+
progress.update_status("bill_ackman_agent", ticker, "Done", analysis=ackman_output.reasoning)
-
+
# Wrap results in a single message for the chain
- message = HumanMessage(
- content=json.dumps(ackman_analysis),
- name="bill_ackman_agent"
- )
-
+ message = HumanMessage(content=json.dumps(ackman_analysis), name="bill_ackman_agent")
+
# Show reasoning if requested
if state["metadata"]["show_reasoning"]:
show_agent_reasoning(ackman_analysis, "Bill Ackman Agent")
-
+
# Add signals to the overall state
state["data"]["analyst_signals"]["bill_ackman_agent"] = ackman_analysis
progress.update_status("bill_ackman_agent", None, "Done")
- return {
- "messages": [message],
- "data": state["data"]
- }
+ return {"messages": [message], "data": state["data"]}
def analyze_business_quality(metrics: list, financial_line_items: list) -> dict:
@@ -141,13 +121,10 @@ def analyze_business_quality(metrics: list, financial_line_items: list) -> dict:
"""
score = 0
details = []
-
+
if not metrics or not financial_line_items:
- return {
- "score": 0,
- "details": "Insufficient data to analyze business quality"
- }
-
+ return {"score": 0, "details": "Insufficient data to analyze business quality"}
+
# 1. Multi-period revenue growth analysis
revenues = [item.revenue for item in financial_line_items if item.revenue is not None]
if len(revenues) >= 2:
@@ -164,11 +141,11 @@ def analyze_business_quality(metrics: list, financial_line_items: list) -> dict:
details.append("Revenue did not grow significantly or data insufficient.")
else:
details.append("Not enough revenue data for multi-period trend.")
-
+
# 2. Operating margin and free cash flow consistency
fcf_vals = [item.free_cash_flow for item in financial_line_items if item.free_cash_flow is not None]
op_margin_vals = [item.operating_margin for item in financial_line_items if item.operating_margin is not None]
-
+
if op_margin_vals:
above_15 = sum(1 for m in op_margin_vals if m > 0.15)
if above_15 >= (len(op_margin_vals) // 2 + 1):
@@ -178,7 +155,7 @@ def analyze_business_quality(metrics: list, financial_line_items: list) -> dict:
details.append("Operating margin not consistently above 15%.")
else:
details.append("No operating margin data across periods.")
-
+
if fcf_vals:
positive_fcf_count = sum(1 for f in fcf_vals if f > 0)
if positive_fcf_count >= (len(fcf_vals) // 2 + 1):
@@ -188,7 +165,7 @@ def analyze_business_quality(metrics: list, financial_line_items: list) -> dict:
details.append("Free cash flow not consistently positive.")
else:
details.append("No free cash flow data across periods.")
-
+
# 3. Return on Equity (ROE) check from the latest metrics
latest_metrics = metrics[0]
if latest_metrics.return_on_equity and latest_metrics.return_on_equity > 0.15:
@@ -198,17 +175,14 @@ def analyze_business_quality(metrics: list, financial_line_items: list) -> dict:
details.append(f"ROE of {latest_metrics.return_on_equity:.1%} is moderate.")
else:
details.append("ROE data not available.")
-
+
# 4. (Optional) Brand Intangible (if intangible_assets are fetched)
# intangible_vals = [item.intangible_assets for item in financial_line_items if item.intangible_assets]
# if intangible_vals and sum(intangible_vals) > 0:
# details.append("Significant intangible assets may indicate brand value or proprietary tech.")
# score += 1
-
- return {
- "score": score,
- "details": "; ".join(details)
- }
+
+ return {"score": score, "details": "; ".join(details)}
def analyze_financial_discipline(metrics: list, financial_line_items: list) -> dict:
@@ -219,13 +193,10 @@ def analyze_financial_discipline(metrics: list, financial_line_items: list) -> d
"""
score = 0
details = []
-
+
if not metrics or not financial_line_items:
- return {
- "score": 0,
- "details": "Insufficient data to analyze financial discipline"
- }
-
+ return {"score": 0, "details": "Insufficient data to analyze financial discipline"}
+
# 1. Multi-period debt ratio or debt_to_equity
debt_to_equity_vals = [item.debt_to_equity for item in financial_line_items if item.debt_to_equity is not None]
if debt_to_equity_vals:
@@ -241,7 +212,7 @@ def analyze_financial_discipline(metrics: list, financial_line_items: list) -> d
for item in financial_line_items:
if item.total_liabilities and item.total_assets and item.total_assets > 0:
liab_to_assets.append(item.total_liabilities / item.total_assets)
-
+
if liab_to_assets:
below_50pct_count = sum(1 for ratio in liab_to_assets if ratio < 0.5)
if below_50pct_count >= (len(liab_to_assets) // 2 + 1):
@@ -251,13 +222,9 @@ def analyze_financial_discipline(metrics: list, financial_line_items: list) -> d
details.append("Liabilities-to-assets >= 50% in many periods.")
else:
details.append("No consistent leverage ratio data available.")
-
+
# 2. Capital allocation approach (dividends + share counts)
- dividends_list = [
- item.dividends_and_other_cash_distributions
- for item in financial_line_items
- if item.dividends_and_other_cash_distributions is not None
- ]
+ dividends_list = [item.dividends_and_other_cash_distributions for item in financial_line_items if item.dividends_and_other_cash_distributions is not None]
if dividends_list:
paying_dividends_count = sum(1 for d in dividends_list if d < 0)
if paying_dividends_count >= (len(dividends_list) // 2 + 1):
@@ -267,7 +234,7 @@ def analyze_financial_discipline(metrics: list, financial_line_items: list) -> d
details.append("Dividends not consistently paid or no data on distributions.")
else:
details.append("No dividend data found across periods.")
-
+
# Check for decreasing share count (simple approach)
shares = [item.outstanding_shares for item in financial_line_items if item.outstanding_shares is not None]
if len(shares) >= 2:
@@ -279,55 +246,43 @@ def analyze_financial_discipline(metrics: list, financial_line_items: list) -> d
details.append("Outstanding shares have not decreased over the available periods.")
else:
details.append("No multi-period share count data to assess buybacks.")
-
- return {
- "score": score,
- "details": "; ".join(details)
- }
+
+ return {"score": score, "details": "; ".join(details)}
def analyze_activism_potential(financial_line_items: list) -> dict:
"""
Bill Ackman often engages in activism if a company has a decent brand or moat
but is underperforming operationally.
-
+
We'll do a simplified approach:
- Look for positive revenue trends but subpar margins
- That may indicate 'activism upside' if operational improvements could unlock value.
"""
if not financial_line_items:
- return {
- "score": 0,
- "details": "Insufficient data for activism potential"
- }
-
+ return {"score": 0, "details": "Insufficient data for activism potential"}
+
# Check revenue growth vs. operating margin
revenues = [item.revenue for item in financial_line_items if item.revenue is not None]
op_margins = [item.operating_margin for item in financial_line_items if item.operating_margin is not None]
-
+
if len(revenues) < 2 or not op_margins:
- return {
- "score": 0,
- "details": "Not enough data to assess activism potential (need multi-year revenue + margins)."
- }
-
+ return {"score": 0, "details": "Not enough data to assess activism potential (need multi-year revenue + margins)."}
+
initial, final = revenues[-1], revenues[0]
revenue_growth = (final - initial) / abs(initial) if initial else 0
avg_margin = sum(op_margins) / len(op_margins)
-
+
score = 0
details = []
-
+
# Suppose if there's decent revenue growth but margins are below 10%, Ackman might see activism potential.
if revenue_growth > 0.15 and avg_margin < 0.10:
score += 2
- details.append(
- f"Revenue growth is healthy (~{revenue_growth*100:.1f}%), but margins are low (avg {avg_margin*100:.1f}%). "
- "Activism could unlock margin improvements."
- )
+ details.append(f"Revenue growth is healthy (~{revenue_growth*100:.1f}%), but margins are low (avg {avg_margin*100:.1f}%). " "Activism could unlock margin improvements.")
else:
details.append("No clear sign of activism opportunity (either margins are already decent or growth is weak).")
-
+
return {"score": score, "details": "; ".join(details)}
@@ -337,79 +292,62 @@ def analyze_valuation(financial_line_items: list, market_cap: float) -> dict:
Uses a simplified DCF with FCF as a proxy, plus margin of safety analysis.
"""
if not financial_line_items or market_cap is None:
- return {
- "score": 0,
- "details": "Insufficient data to perform valuation"
- }
-
+ return {"score": 0, "details": "Insufficient data to perform valuation"}
+
# Since financial_line_items are in descending order (newest first),
# the most recent period is the first element
latest = financial_line_items[0]
fcf = latest.free_cash_flow if latest.free_cash_flow else 0
-
+
if fcf <= 0:
- return {
- "score": 0,
- "details": f"No positive FCF for valuation; FCF = {fcf}",
- "intrinsic_value": None
- }
-
+ return {"score": 0, "details": f"No positive FCF for valuation; FCF = {fcf}", "intrinsic_value": None}
+
# Basic DCF assumptions
growth_rate = 0.06
discount_rate = 0.10
terminal_multiple = 15
projection_years = 5
-
+
present_value = 0
for year in range(1, projection_years + 1):
future_fcf = fcf * (1 + growth_rate) ** year
pv = future_fcf / ((1 + discount_rate) ** year)
present_value += pv
-
+
# Terminal Value
- terminal_value = (
- fcf * (1 + growth_rate) ** projection_years * terminal_multiple
- ) / ((1 + discount_rate) ** projection_years)
-
+ terminal_value = (fcf * (1 + growth_rate) ** projection_years * terminal_multiple) / ((1 + discount_rate) ** projection_years)
+
intrinsic_value = present_value + terminal_value
margin_of_safety = (intrinsic_value - market_cap) / market_cap
-
+
score = 0
# Simple scoring
if margin_of_safety > 0.3:
score += 3
elif margin_of_safety > 0.1:
score += 1
-
- details = [
- f"Calculated intrinsic value: ~{intrinsic_value:,.2f}",
- f"Market cap: ~{market_cap:,.2f}",
- f"Margin of safety: {margin_of_safety:.2%}"
- ]
-
- return {
- "score": score,
- "details": "; ".join(details),
- "intrinsic_value": intrinsic_value,
- "margin_of_safety": margin_of_safety
- }
+
+ details = [f"Calculated intrinsic value: ~{intrinsic_value:,.2f}", f"Market cap: ~{market_cap:,.2f}", f"Margin of safety: {margin_of_safety:.2%}"]
+
+ return {"score": score, "details": "; ".join(details), "intrinsic_value": intrinsic_value, "margin_of_safety": margin_of_safety}
def generate_ackman_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> BillAckmanSignal:
"""
Generates investment decisions in the style of Bill Ackman.
- Includes more explicit references to brand strength, activism potential,
+ Includes more explicit references to brand strength, activism potential,
catalysts, and management changes in the system prompt.
"""
- template = ChatPromptTemplate.from_messages([
- (
- "system",
- """You are a Bill Ackman AI agent, making investment decisions using his principles:
+ template = ChatPromptTemplate.from_messages(
+ [
+ (
+ "system",
+ """You are a Bill Ackman AI agent, making investment decisions using his principles:
1. Seek high-quality businesses with durable competitive advantages (moats), often in well-known consumer or service brands.
2. Prioritize consistent free cash flow and growth potential over the long term.
@@ -427,11 +365,11 @@ def generate_ackman_output(
- Use a confident, analytic, and sometimes confrontational tone when discussing weaknesses or opportunities.
Return your final recommendation (signal: bullish, neutral, or bearish) with a 0-100 confidence and a thorough reasoning section.
- """
- ),
- (
- "human",
- """Based on the following analysis, create an Ackman-style investment signal.
+ """,
+ ),
+ (
+ "human",
+ """Based on the following analysis, create an Ackman-style investment signal.
Analysis Data for {ticker}:
{analysis_data}
@@ -442,27 +380,21 @@ def generate_ackman_output(
"confidence": float (0-100),
"reasoning": "string"
}}
- """
- )
- ])
+ """,
+ ),
+ ]
+ )
- prompt = template.invoke({
- "analysis_data": json.dumps(analysis_data, indent=2),
- "ticker": ticker
- })
+ prompt = template.invoke({"analysis_data": json.dumps(analysis_data, indent=2), "ticker": ticker})
def create_default_bill_ackman_signal():
- return BillAckmanSignal(
- signal="neutral",
- confidence=0.0,
- reasoning="Error in analysis, defaulting to neutral"
- )
+ return BillAckmanSignal(signal="neutral", confidence=0.0, reasoning="Error in analysis, defaulting to neutral")
return call_llm(
- prompt=prompt,
- model_name=model_name,
- model_provider=model_provider,
- pydantic_model=BillAckmanSignal,
- agent_name="bill_ackman_agent",
+ prompt=prompt,
+ model_name=model_name,
+ model_provider=model_provider,
+ pydantic_model=BillAckmanSignal,
+ agent_name="bill_ackman_agent",
default_factory=create_default_bill_ackman_signal,
)
diff --git a/src/agents/cathie_wood.py b/src/agents/cathie_wood.py
index 72916b974..f2a6a3597 100644
--- a/src/agents/cathie_wood.py
+++ b/src/agents/cathie_wood.py
@@ -1,12 +1,15 @@
-from src.graph.state import AgentState, show_agent_reasoning
-from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
-from langchain_core.prompts import ChatPromptTemplate
+import json
+from typing import Any
+
from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel
-import json
from typing_extensions import Literal
-from src.utils.progress import progress
+
+from src.graph.state import AgentState, show_agent_reasoning
+from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
from src.utils.llm import call_llm
+from src.utils.progress import progress
class CathieWoodSignal(BaseModel):
@@ -360,7 +363,7 @@ def analyze_cathie_wood_valuation(financial_line_items: list, market_cap: float)
def generate_cathie_wood_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> CathieWoodSignal:
diff --git a/src/agents/charlie_munger.py b/src/agents/charlie_munger.py
index a9895302c..15e5fa167 100644
--- a/src/agents/charlie_munger.py
+++ b/src/agents/charlie_munger.py
@@ -1,12 +1,22 @@
-from src.graph.state import AgentState, show_agent_reasoning
-from src.tools.api import get_financial_metrics, get_market_cap, search_line_items, get_insider_trades, get_company_news
-from langchain_core.prompts import ChatPromptTemplate
+import json
+from typing import Any
+
from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel
-import json
from typing_extensions import Literal
-from src.utils.progress import progress
+
+from src.graph.state import AgentState, show_agent_reasoning
+from src.tools.api import (
+ get_company_news,
+ get_financial_metrics,
+ get_insider_trades,
+ get_market_cap,
+ search_line_items,
+)
from src.utils.llm import call_llm
+from src.utils.progress import progress
+
class CharlieMungerSignal(BaseModel):
signal: Literal["bullish", "bearish", "neutral"]
@@ -22,14 +32,14 @@ def charlie_munger_agent(state: AgentState):
data = state["data"]
end_date = data["end_date"]
tickers = data["tickers"]
-
+
analysis_data = {}
munger_analysis = {}
-
+
for ticker in tickers:
progress.update_status("charlie_munger_agent", ticker, "Fetching financial metrics")
metrics = get_financial_metrics(ticker, end_date, period="annual", limit=10) # Munger looks at longer periods
-
+
progress.update_status("charlie_munger_agent", ticker, "Gathering financial line items")
financial_line_items = search_line_items(
ticker,
@@ -51,12 +61,12 @@ def charlie_munger_agent(state: AgentState):
],
end_date,
period="annual",
- limit=10 # Munger examines long-term trends
+ limit=10, # Munger examines long-term trends
)
-
+
progress.update_status("charlie_munger_agent", ticker, "Getting market cap")
market_cap = get_market_cap(ticker, end_date)
-
+
progress.update_status("charlie_munger_agent", ticker, "Fetching insider trades")
# Munger values management with skin in the game
insider_trades = get_insider_trades(
@@ -64,9 +74,9 @@ def charlie_munger_agent(state: AgentState):
end_date,
# Look back 2 years for insider trading patterns
start_date=None,
- limit=100
+ limit=100,
)
-
+
progress.update_status("charlie_munger_agent", ticker, "Fetching company news")
# Munger avoids businesses with frequent negative press
company_news = get_company_news(
@@ -74,32 +84,27 @@ def charlie_munger_agent(state: AgentState):
end_date,
# Look back 1 year for news
start_date=None,
- limit=100
+ limit=100,
)
-
+
progress.update_status("charlie_munger_agent", ticker, "Analyzing moat strength")
moat_analysis = analyze_moat_strength(metrics, financial_line_items)
-
+
progress.update_status("charlie_munger_agent", ticker, "Analyzing management quality")
management_analysis = analyze_management_quality(financial_line_items, insider_trades)
-
+
progress.update_status("charlie_munger_agent", ticker, "Analyzing business predictability")
predictability_analysis = analyze_predictability(financial_line_items)
-
+
progress.update_status("charlie_munger_agent", ticker, "Calculating Munger-style valuation")
valuation_analysis = calculate_munger_valuation(financial_line_items, market_cap)
-
+
# Combine partial scores with Munger's weighting preferences
# Munger weights quality and predictability higher than current valuation
- total_score = (
- moat_analysis["score"] * 0.35 +
- management_analysis["score"] * 0.25 +
- predictability_analysis["score"] * 0.25 +
- valuation_analysis["score"] * 0.15
- )
-
+ total_score = moat_analysis["score"] * 0.35 + management_analysis["score"] * 0.25 + predictability_analysis["score"] * 0.25 + valuation_analysis["score"] * 0.15
+
max_possible_score = 10 # Scale to 0-10
-
+
# Generate a simple buy/hold/sell signal
if total_score >= 7.5: # Munger has very high standards
signal = "bullish"
@@ -107,7 +112,7 @@ def charlie_munger_agent(state: AgentState):
signal = "bearish"
else:
signal = "neutral"
-
+
analysis_data[ticker] = {
"signal": signal,
"score": total_score,
@@ -117,44 +122,34 @@ def charlie_munger_agent(state: AgentState):
"predictability_analysis": predictability_analysis,
"valuation_analysis": valuation_analysis,
# Include some qualitative assessment from news
- "news_sentiment": analyze_news_sentiment(company_news) if company_news else "No news data available"
+ "news_sentiment": analyze_news_sentiment(company_news) if company_news else "No news data available",
}
-
+
progress.update_status("charlie_munger_agent", ticker, "Generating Charlie Munger analysis")
munger_output = generate_munger_output(
- ticker=ticker,
+ ticker=ticker,
analysis_data=analysis_data,
model_name=state["metadata"]["model_name"],
model_provider=state["metadata"]["model_provider"],
)
-
- munger_analysis[ticker] = {
- "signal": munger_output.signal,
- "confidence": munger_output.confidence,
- "reasoning": munger_output.reasoning
- }
-
+
+ munger_analysis[ticker] = {"signal": munger_output.signal, "confidence": munger_output.confidence, "reasoning": munger_output.reasoning}
+
progress.update_status("charlie_munger_agent", ticker, "Done", analysis=munger_output.reasoning)
-
+
# Wrap results in a single message for the chain
- message = HumanMessage(
- content=json.dumps(munger_analysis),
- name="charlie_munger_agent"
- )
-
+ message = HumanMessage(content=json.dumps(munger_analysis), name="charlie_munger_agent")
+
# Show reasoning if requested
if state["metadata"]["show_reasoning"]:
show_agent_reasoning(munger_analysis, "Charlie Munger Agent")
progress.update_status("charlie_munger_agent", None, "Done")
-
+
# Add signals to the overall state
state["data"]["analyst_signals"]["charlie_munger_agent"] = munger_analysis
- return {
- "messages": [message],
- "data": state["data"]
- }
+ return {"messages": [message], "data": state["data"]}
def analyze_moat_strength(metrics: list, financial_line_items: list) -> dict:
@@ -167,17 +162,13 @@ def analyze_moat_strength(metrics: list, financial_line_items: list) -> dict:
"""
score = 0
details = []
-
+
if not metrics or not financial_line_items:
- return {
- "score": 0,
- "details": "Insufficient data to analyze moat strength"
- }
-
+ return {"score": 0, "details": "Insufficient data to analyze moat strength"}
+
# 1. Return on Invested Capital (ROIC) analysis - Munger's favorite metric
- roic_values = [item.return_on_invested_capital for item in financial_line_items
- if hasattr(item, 'return_on_invested_capital') and item.return_on_invested_capital is not None]
-
+ roic_values = [item.return_on_invested_capital for item in financial_line_items if hasattr(item, "return_on_invested_capital") and item.return_on_invested_capital is not None]
+
if roic_values:
# Check if ROIC consistently above 15% (Munger's threshold)
high_roic_count = sum(1 for r in roic_values if r > 0.15)
@@ -194,14 +185,13 @@ def analyze_moat_strength(metrics: list, financial_line_items: list) -> dict:
details.append("Poor ROIC: Never exceeds 15% threshold")
else:
details.append("No ROIC data available")
-
+
# 2. Pricing power - check gross margin stability and trends
- gross_margins = [item.gross_margin for item in financial_line_items
- if hasattr(item, 'gross_margin') and item.gross_margin is not None]
-
+ gross_margins = [item.gross_margin for item in financial_line_items if hasattr(item, "gross_margin") and item.gross_margin is not None]
+
if gross_margins and len(gross_margins) >= 3:
# Munger likes stable or improving gross margins
- margin_trend = sum(1 for i in range(1, len(gross_margins)) if gross_margins[i] >= gross_margins[i-1])
+ margin_trend = sum(1 for i in range(1, len(gross_margins)) if gross_margins[i] >= gross_margins[i - 1])
if margin_trend >= len(gross_margins) * 0.7: # Improving in 70% of periods
score += 2
details.append("Strong pricing power: Gross margins consistently improving")
@@ -212,17 +202,16 @@ def analyze_moat_strength(metrics: list, financial_line_items: list) -> dict:
details.append("Limited pricing power: Low or declining gross margins")
else:
details.append("Insufficient gross margin data")
-
+
# 3. Capital intensity - Munger prefers low capex businesses
if len(financial_line_items) >= 3:
capex_to_revenue = []
for item in financial_line_items:
- if (hasattr(item, 'capital_expenditure') and item.capital_expenditure is not None and
- hasattr(item, 'revenue') and item.revenue is not None and item.revenue > 0):
+ if hasattr(item, "capital_expenditure") and item.capital_expenditure is not None and hasattr(item, "revenue") and item.revenue is not None and item.revenue > 0:
# Note: capital_expenditure is typically negative in financial statements
capex_ratio = abs(item.capital_expenditure) / item.revenue
capex_to_revenue.append(capex_ratio)
-
+
if capex_to_revenue:
avg_capex_ratio = sum(capex_to_revenue) / len(capex_to_revenue)
if avg_capex_ratio < 0.05: # Less than 5% of revenue
@@ -237,30 +226,25 @@ def analyze_moat_strength(metrics: list, financial_line_items: list) -> dict:
details.append("No capital expenditure data available")
else:
details.append("Insufficient data for capital intensity analysis")
-
+
# 4. Intangible assets - Munger values R&D and intellectual property
- r_and_d = [item.research_and_development for item in financial_line_items
- if hasattr(item, 'research_and_development') and item.research_and_development is not None]
-
- goodwill_and_intangible_assets = [item.goodwill_and_intangible_assets for item in financial_line_items
- if hasattr(item, 'goodwill_and_intangible_assets') and item.goodwill_and_intangible_assets is not None]
+ r_and_d = [item.research_and_development for item in financial_line_items if hasattr(item, "research_and_development") and item.research_and_development is not None]
+
+ goodwill_and_intangible_assets = [item.goodwill_and_intangible_assets for item in financial_line_items if hasattr(item, "goodwill_and_intangible_assets") and item.goodwill_and_intangible_assets is not None]
if r_and_d and len(r_and_d) > 0:
if sum(r_and_d) > 0: # If company is investing in R&D
score += 1
details.append("Invests in R&D, building intellectual property")
-
- if (goodwill_and_intangible_assets and len(goodwill_and_intangible_assets) > 0):
+
+ if goodwill_and_intangible_assets and len(goodwill_and_intangible_assets) > 0:
score += 1
details.append("Significant goodwill/intangible assets, suggesting brand value or IP")
-
+
# Scale score to 0-10 range
final_score = min(10, score * 10 / 9) # Max possible raw score is 9
-
- return {
- "score": final_score,
- "details": "; ".join(details)
- }
+
+ return {"score": final_score, "details": "; ".join(details)}
def analyze_management_quality(financial_line_items: list, insider_trades: list) -> dict:
@@ -274,28 +258,23 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list)
"""
score = 0
details = []
-
+
if not financial_line_items:
- return {
- "score": 0,
- "details": "Insufficient data to analyze management quality"
- }
-
+ return {"score": 0, "details": "Insufficient data to analyze management quality"}
+
# 1. Capital allocation - Check FCF to net income ratio
# Munger values companies that convert earnings to cash
- fcf_values = [item.free_cash_flow for item in financial_line_items
- if hasattr(item, 'free_cash_flow') and item.free_cash_flow is not None]
-
- net_income_values = [item.net_income for item in financial_line_items
- if hasattr(item, 'net_income') and item.net_income is not None]
-
+ fcf_values = [item.free_cash_flow for item in financial_line_items if hasattr(item, "free_cash_flow") and item.free_cash_flow is not None]
+
+ net_income_values = [item.net_income for item in financial_line_items if hasattr(item, "net_income") and item.net_income is not None]
+
if fcf_values and net_income_values and len(fcf_values) == len(net_income_values):
# Calculate FCF to Net Income ratio for each period
fcf_to_ni_ratios = []
for i in range(len(fcf_values)):
if net_income_values[i] and net_income_values[i] > 0:
fcf_to_ni_ratios.append(fcf_values[i] / net_income_values[i])
-
+
if fcf_to_ni_ratios:
avg_ratio = sum(fcf_to_ni_ratios) / len(fcf_to_ni_ratios)
if avg_ratio > 1.1: # FCF > net income suggests good accounting
@@ -313,18 +292,16 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list)
details.append("Could not calculate FCF to Net Income ratios")
else:
details.append("Missing FCF or Net Income data")
-
+
# 2. Debt management - Munger is cautious about debt
- debt_values = [item.total_debt for item in financial_line_items
- if hasattr(item, 'total_debt') and item.total_debt is not None]
-
- equity_values = [item.shareholders_equity for item in financial_line_items
- if hasattr(item, 'shareholders_equity') and item.shareholders_equity is not None]
-
+ debt_values = [item.total_debt for item in financial_line_items if hasattr(item, "total_debt") and item.total_debt is not None]
+
+ equity_values = [item.shareholders_equity for item in financial_line_items if hasattr(item, "shareholders_equity") and item.shareholders_equity is not None]
+
if debt_values and equity_values and len(debt_values) == len(equity_values):
# Calculate D/E ratio for most recent period
- recent_de_ratio = debt_values[0] / equity_values[0] if equity_values[0] > 0 else float('inf')
-
+ recent_de_ratio = debt_values[0] / equity_values[0] if equity_values[0] > 0 else float("inf")
+
if recent_de_ratio < 0.3: # Very low debt
score += 3
details.append(f"Conservative debt management: D/E ratio of {recent_de_ratio:.2f}")
@@ -338,17 +315,15 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list)
details.append(f"High debt level: D/E ratio of {recent_de_ratio:.2f}")
else:
details.append("Missing debt or equity data")
-
+
# 3. Cash management efficiency - Munger values appropriate cash levels
- cash_values = [item.cash_and_equivalents for item in financial_line_items
- if hasattr(item, 'cash_and_equivalents') and item.cash_and_equivalents is not None]
- revenue_values = [item.revenue for item in financial_line_items
- if hasattr(item, 'revenue') and item.revenue is not None]
-
+ cash_values = [item.cash_and_equivalents for item in financial_line_items if hasattr(item, "cash_and_equivalents") and item.cash_and_equivalents is not None]
+ revenue_values = [item.revenue for item in financial_line_items if hasattr(item, "revenue") and item.revenue is not None]
+
if cash_values and revenue_values and len(cash_values) > 0 and len(revenue_values) > 0:
# Calculate cash to revenue ratio (Munger likes 10-20% for most businesses)
cash_to_revenue = cash_values[0] / revenue_values[0] if revenue_values[0] > 0 else 0
-
+
if 0.1 <= cash_to_revenue <= 0.25:
# Goldilocks zone - not too much, not too little
score += 2
@@ -365,15 +340,13 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list)
details.append(f"Low cash reserves: Cash/Revenue ratio of {cash_to_revenue:.2f}")
else:
details.append("Insufficient cash or revenue data")
-
+
# 4. Insider activity - Munger values skin in the game
if insider_trades and len(insider_trades) > 0:
# Count buys vs. sells
- buys = sum(1 for trade in insider_trades if hasattr(trade, 'transaction_type') and
- trade.transaction_type and trade.transaction_type.lower() in ['buy', 'purchase'])
- sells = sum(1 for trade in insider_trades if hasattr(trade, 'transaction_type') and
- trade.transaction_type and trade.transaction_type.lower() in ['sell', 'sale'])
-
+ buys = sum(1 for trade in insider_trades if hasattr(trade, "transaction_type") and trade.transaction_type and trade.transaction_type.lower() in ["buy", "purchase"])
+ sells = sum(1 for trade in insider_trades if hasattr(trade, "transaction_type") and trade.transaction_type and trade.transaction_type.lower() in ["sell", "sale"])
+
# Calculate the buy ratio
total_trades = buys + sells
if total_trades > 0:
@@ -393,11 +366,10 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list)
details.append("No recorded insider transactions")
else:
details.append("No insider trading data available")
-
+
# 5. Consistency in share count - Munger prefers stable/decreasing shares
- share_counts = [item.outstanding_shares for item in financial_line_items
- if hasattr(item, 'outstanding_shares') and item.outstanding_shares is not None]
-
+ share_counts = [item.outstanding_shares for item in financial_line_items if hasattr(item, "outstanding_shares") and item.outstanding_shares is not None]
+
if share_counts and len(share_counts) >= 3:
if share_counts[0] < share_counts[-1] * 0.95: # 5%+ reduction in shares
score += 2
@@ -412,15 +384,12 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list)
details.append("Moderate share count increase over time")
else:
details.append("Insufficient share count data")
-
+
# Scale score to 0-10 range
# Maximum possible raw score would be 12 (3+3+2+2+2)
final_score = max(0, min(10, score * 10 / 12))
-
- return {
- "score": final_score,
- "details": "; ".join(details)
- }
+
+ return {"score": final_score, "details": "; ".join(details)}
def analyze_predictability(financial_line_items: list) -> dict:
@@ -430,24 +399,20 @@ def analyze_predictability(financial_line_items: list) -> dict:
"""
score = 0
details = []
-
+
if not financial_line_items or len(financial_line_items) < 5:
- return {
- "score": 0,
- "details": "Insufficient data to analyze business predictability (need 5+ years)"
- }
-
+ return {"score": 0, "details": "Insufficient data to analyze business predictability (need 5+ years)"}
+
# 1. Revenue stability and growth
- revenues = [item.revenue for item in financial_line_items
- if hasattr(item, 'revenue') and item.revenue is not None]
-
+ revenues = [item.revenue for item in financial_line_items if hasattr(item, "revenue") and item.revenue is not None]
+
if revenues and len(revenues) >= 5:
# Calculate year-over-year growth rates
- growth_rates = [(revenues[i] / revenues[i+1] - 1) for i in range(len(revenues)-1)]
-
+ growth_rates = [(revenues[i] / revenues[i + 1] - 1) for i in range(len(revenues) - 1)]
+
avg_growth = sum(growth_rates) / len(growth_rates)
growth_volatility = sum(abs(r - avg_growth) for r in growth_rates) / len(growth_rates)
-
+
if avg_growth > 0.05 and growth_volatility < 0.1:
# Steady, consistent growth (Munger loves this)
score += 3
@@ -464,15 +429,14 @@ def analyze_predictability(financial_line_items: list) -> dict:
details.append(f"Declining or highly unpredictable revenue: {avg_growth:.1%} avg growth")
else:
details.append("Insufficient revenue history for predictability analysis")
-
+
# 2. Operating income stability
- op_income = [item.operating_income for item in financial_line_items
- if hasattr(item, 'operating_income') and item.operating_income is not None]
-
+ op_income = [item.operating_income for item in financial_line_items if hasattr(item, "operating_income") and item.operating_income is not None]
+
if op_income and len(op_income) >= 5:
# Count positive operating income periods
positive_periods = sum(1 for income in op_income if income > 0)
-
+
if positive_periods == len(op_income):
# Consistently profitable operations
score += 3
@@ -489,16 +453,15 @@ def analyze_predictability(financial_line_items: list) -> dict:
details.append(f"Unpredictable operations: Operating income positive in only {positive_periods}/{len(op_income)} periods")
else:
details.append("Insufficient operating income history")
-
+
# 3. Margin consistency - Munger values stable margins
- op_margins = [item.operating_margin for item in financial_line_items
- if hasattr(item, 'operating_margin') and item.operating_margin is not None]
-
+ op_margins = [item.operating_margin for item in financial_line_items if hasattr(item, "operating_margin") and item.operating_margin is not None]
+
if op_margins and len(op_margins) >= 5:
# Calculate margin volatility
avg_margin = sum(op_margins) / len(op_margins)
margin_volatility = sum(abs(m - avg_margin) for m in op_margins) / len(op_margins)
-
+
if margin_volatility < 0.03: # Very stable margins
score += 2
details.append(f"Highly predictable margins: {avg_margin:.1%} avg with minimal volatility")
@@ -509,15 +472,14 @@ def analyze_predictability(financial_line_items: list) -> dict:
details.append(f"Unpredictable margins: {avg_margin:.1%} avg with high volatility ({margin_volatility:.1%})")
else:
details.append("Insufficient margin history")
-
+
# 4. Cash generation reliability
- fcf_values = [item.free_cash_flow for item in financial_line_items
- if hasattr(item, 'free_cash_flow') and item.free_cash_flow is not None]
-
+ fcf_values = [item.free_cash_flow for item in financial_line_items if hasattr(item, "free_cash_flow") and item.free_cash_flow is not None]
+
if fcf_values and len(fcf_values) >= 5:
# Count positive FCF periods
positive_fcf_periods = sum(1 for fcf in fcf_values if fcf > 0)
-
+
if positive_fcf_periods == len(fcf_values):
# Consistently positive FCF
score += 2
@@ -530,15 +492,12 @@ def analyze_predictability(financial_line_items: list) -> dict:
details.append(f"Unpredictable cash generation: Positive FCF in only {positive_fcf_periods}/{len(fcf_values)} periods")
else:
details.append("Insufficient free cash flow history")
-
+
# Scale score to 0-10 range
# Maximum possible raw score would be 10 (3+3+2+2)
final_score = min(10, score * 10 / 10)
-
- return {
- "score": final_score,
- "details": "; ".join(details)
- }
+
+ return {"score": final_score, "details": "; ".join(details)}
def calculate_munger_valuation(financial_line_items: list, market_cap: float) -> dict:
@@ -550,37 +509,26 @@ def calculate_munger_valuation(financial_line_items: list, market_cap: float) ->
"""
score = 0
details = []
-
+
if not financial_line_items or market_cap is None:
- return {
- "score": 0,
- "details": "Insufficient data to perform valuation"
- }
-
+ return {"score": 0, "details": "Insufficient data to perform valuation"}
+
# Get FCF values (Munger's preferred "owner earnings" metric)
- fcf_values = [item.free_cash_flow for item in financial_line_items
- if hasattr(item, 'free_cash_flow') and item.free_cash_flow is not None]
-
+ fcf_values = [item.free_cash_flow for item in financial_line_items if hasattr(item, "free_cash_flow") and item.free_cash_flow is not None]
+
if not fcf_values or len(fcf_values) < 3:
- return {
- "score": 0,
- "details": "Insufficient free cash flow data for valuation"
- }
-
+ return {"score": 0, "details": "Insufficient free cash flow data for valuation"}
+
# 1. Normalize earnings by taking average of last 3-5 years
# (Munger prefers to normalize earnings to avoid over/under-valuation based on cyclical factors)
- normalized_fcf = sum(fcf_values[:min(5, len(fcf_values))]) / min(5, len(fcf_values))
-
+ normalized_fcf = sum(fcf_values[: min(5, len(fcf_values))]) / min(5, len(fcf_values))
+
if normalized_fcf <= 0:
- return {
- "score": 0,
- "details": f"Negative or zero normalized FCF ({normalized_fcf}), cannot value",
- "intrinsic_value": None
- }
-
+ return {"score": 0, "details": f"Negative or zero normalized FCF ({normalized_fcf}), cannot value", "intrinsic_value": None}
+
# 2. Calculate FCF yield (inverse of P/FCF multiple)
fcf_yield = normalized_fcf / market_cap
-
+
# 3. Apply Munger's FCF multiple based on business quality
# Munger would pay higher multiples for wonderful businesses
# Let's use a sliding scale where higher FCF yields are more attractive
@@ -595,16 +543,16 @@ def calculate_munger_valuation(financial_line_items: list, market_cap: float) ->
details.append(f"Fair value: {fcf_yield:.1%} FCF yield")
else:
details.append(f"Expensive: Only {fcf_yield:.1%} FCF yield")
-
+
# 4. Calculate simple intrinsic value range
# Munger tends to use straightforward valuations, avoiding complex DCF models
conservative_value = normalized_fcf * 10 # 10x FCF = 10% yield
- reasonable_value = normalized_fcf * 15 # 15x FCF ≈ 6.7% yield
- optimistic_value = normalized_fcf * 20 # 20x FCF = 5% yield
-
+ reasonable_value = normalized_fcf * 15 # 15x FCF ≈ 6.7% yield
+ optimistic_value = normalized_fcf * 20 # 20x FCF = 5% yield
+
# 5. Calculate margins of safety
current_to_reasonable = (reasonable_value - market_cap) / market_cap
-
+
if current_to_reasonable > 0.3: # >30% upside
score += 3
details.append(f"Large margin of safety: {current_to_reasonable:.1%} upside to reasonable value")
@@ -616,13 +564,13 @@ def calculate_munger_valuation(financial_line_items: list, market_cap: float) ->
details.append(f"Fair price: Within 10% of reasonable value ({current_to_reasonable:.1%})")
else:
details.append(f"Expensive: {-current_to_reasonable:.1%} premium to reasonable value")
-
+
# 6. Check earnings trajectory for additional context
# Munger likes growing owner earnings
if len(fcf_values) >= 3:
recent_avg = sum(fcf_values[:3]) / 3
older_avg = sum(fcf_values[-3:]) / 3 if len(fcf_values) >= 6 else fcf_values[-1]
-
+
if recent_avg > older_avg * 1.2: # >20% growth in FCF
score += 3
details.append("Growing FCF trend adds to intrinsic value")
@@ -631,22 +579,12 @@ def calculate_munger_valuation(financial_line_items: list, market_cap: float) ->
details.append("Stable to growing FCF supports valuation")
else:
details.append("Declining FCF trend is concerning")
-
+
# Scale score to 0-10 range
# Maximum possible raw score would be 10 (4+3+3)
- final_score = min(10, score * 10 / 10)
-
- return {
- "score": final_score,
- "details": "; ".join(details),
- "intrinsic_value_range": {
- "conservative": conservative_value,
- "reasonable": reasonable_value,
- "optimistic": optimistic_value
- },
- "fcf_yield": fcf_yield,
- "normalized_fcf": normalized_fcf
- }
+ final_score = min(10, score * 10 / 10)
+
+ return {"score": final_score, "details": "; ".join(details), "intrinsic_value_range": {"conservative": conservative_value, "reasonable": reasonable_value, "optimistic": optimistic_value}, "fcf_yield": fcf_yield, "normalized_fcf": normalized_fcf}
def analyze_news_sentiment(news_items: list) -> str:
@@ -656,24 +594,25 @@ def analyze_news_sentiment(news_items: list) -> str:
"""
if not news_items or len(news_items) == 0:
return "No news data available"
-
+
# Just return a simple count for now - in a real implementation, this would use NLP
return f"Qualitative review of {len(news_items)} recent news items would be needed"
def generate_munger_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> CharlieMungerSignal:
"""
Generates investment decisions in the style of Charlie Munger.
"""
- template = ChatPromptTemplate.from_messages([
- (
- "system",
- """You are a Charlie Munger AI agent, making investment decisions using his principles:
+ template = ChatPromptTemplate.from_messages(
+ [
+ (
+ "system",
+ """You are a Charlie Munger AI agent, making investment decisions using his principles:
1. Focus on the quality and predictability of the business.
2. Rely on mental models from multiple disciplines to analyze investments.
@@ -705,11 +644,11 @@ def generate_munger_output(
For example, if bullish: "The high ROIC of 22% demonstrates the company's moat. When applying basic microeconomics, we can see that competitors would struggle to..."
For example, if bearish: "I see this business making a classic mistake in capital allocation. As I've often said about [relevant Mungerism], this company appears to be..."
- """
- ),
- (
- "human",
- """Based on the following analysis, create a Munger-style investment signal.
+ """,
+ ),
+ (
+ "human",
+ """Based on the following analysis, create a Munger-style investment signal.
Analysis Data for {ticker}:
{analysis_data}
@@ -720,27 +659,21 @@ def generate_munger_output(
"confidence": float (0-100),
"reasoning": "string"
}}
- """
- )
- ])
+ """,
+ ),
+ ]
+ )
- prompt = template.invoke({
- "analysis_data": json.dumps(analysis_data, indent=2),
- "ticker": ticker
- })
+ prompt = template.invoke({"analysis_data": json.dumps(analysis_data, indent=2), "ticker": ticker})
def create_default_charlie_munger_signal():
- return CharlieMungerSignal(
- signal="neutral",
- confidence=0.0,
- reasoning="Error in analysis, defaulting to neutral"
- )
+ return CharlieMungerSignal(signal="neutral", confidence=0.0, reasoning="Error in analysis, defaulting to neutral")
return call_llm(
- prompt=prompt,
- model_name=model_name,
- model_provider=model_provider,
- pydantic_model=CharlieMungerSignal,
- agent_name="charlie_munger_agent",
+ prompt=prompt,
+ model_name=model_name,
+ model_provider=model_provider,
+ pydantic_model=CharlieMungerSignal,
+ agent_name="charlie_munger_agent",
default_factory=create_default_charlie_munger_signal,
- )
\ No newline at end of file
+ )
diff --git a/src/agents/crypto_risk_manager.py b/src/agents/crypto_risk_manager.py
new file mode 100644
index 000000000..350b1e7bc
--- /dev/null
+++ b/src/agents/crypto_risk_manager.py
@@ -0,0 +1,126 @@
+import json
+
+import ccxt # type: ignore
+import pandas as pd
+from langchain_core.messages import HumanMessage
+
+from src.data.api import get_price_ohlcv
+from src.data.models import Price
+from src.graph.state import AgentState, show_agent_reasoning
+from src.tools.api import prices_to_df
+from src.utils.progress import progress
+
+
+##### Risk Management Agent #####
+def _max_position_size(portfolio: dict, remaining: float, ex: ccxt.Exchange, pair: str) -> float:
+ fee = ex.fetch_trading_fee(pair).get("taker", 0)
+ available = portfolio.get("cash", 0)
+ cost_with_fee = available / (1 + fee)
+ return min(remaining, cost_with_fee)
+
+
+def risk_management_agent(state: AgentState):
+ """Controls position sizing based on real-world risk factors for multiple tickers."""
+ portfolio = state["data"]["portfolio"]
+ data = state["data"]
+ tickers = data["tickers"]
+
+ # Initialize risk analysis for each ticker
+ risk_analysis = {}
+ current_prices = {} # Store prices here to avoid redundant API calls
+
+ # First, fetch prices for all relevant tickers
+ all_tickers = set(tickers) | set(portfolio.get("positions", {}).keys())
+
+ ex = getattr(ccxt, data.get("exchange", "binance"))()
+
+ for ticker in all_tickers:
+ progress.update_status("risk_management_agent", ticker, "Fetching price data")
+
+ prices = get_price_ohlcv(
+ symbol_or_pair=ticker,
+ start=data["start_date"],
+ end=data["end_date"],
+ exchange=data.get("exchange", "binance"),
+ )
+
+ if not prices:
+ progress.update_status("risk_management_agent", ticker, "Warning: No price data found")
+ continue
+
+ prices_df = prices_to_df([Price(**p) for p in prices])
+
+ if not prices_df.empty:
+ current_price = prices_df["close"].iloc[-1]
+ current_prices[ticker] = current_price
+ progress.update_status("risk_management_agent", ticker, f"Current price: {current_price}")
+ else:
+ progress.update_status("risk_management_agent", ticker, "Warning: Empty price data")
+
+ # Calculate total portfolio value based on current market prices (Net Liquidation Value)
+ total_portfolio_value = portfolio.get("cash", 0.0)
+
+ for ticker, position in portfolio.get("positions", {}).items():
+ if ticker in current_prices:
+ # Add market value of long positions
+ total_portfolio_value += position.get("long", 0) * current_prices[ticker]
+ # Subtract market value of short positions
+ total_portfolio_value -= position.get("short", 0) * current_prices[ticker]
+
+ progress.update_status("risk_management_agent", None, f"Total portfolio value: {total_portfolio_value}")
+
+ # Calculate risk limits for each ticker in the universe
+ for ticker in tickers:
+ progress.update_status("risk_management_agent", ticker, "Calculating position limits")
+
+ if ticker not in current_prices:
+ progress.update_status("risk_management_agent", ticker, "Failed: No price data available")
+ risk_analysis[ticker] = {"remaining_position_limit": 0.0, "current_price": 0.0, "reasoning": {"error": "Missing price data for risk calculation"}}
+ continue
+
+ current_price = current_prices[ticker]
+
+ # Calculate current market value of this position
+ position = portfolio.get("positions", {}).get(ticker, {})
+ long_value = position.get("long", 0) * current_price
+ short_value = position.get("short", 0) * current_price
+ current_position_value = abs(long_value - short_value) # Use absolute exposure
+
+ # Calculate position limit (20% of total portfolio)
+ position_limit = total_portfolio_value * 0.20
+
+ # Calculate remaining limit for this position
+ remaining_position_limit = position_limit - current_position_value
+
+ # Ensure we don't exceed available cash
+ max_position_size = _max_position_size(portfolio, remaining_position_limit, ex, ticker)
+
+ risk_analysis[ticker] = {
+ "remaining_position_limit": float(max_position_size),
+ "current_price": float(current_price),
+ "reasoning": {
+ "portfolio_value": float(total_portfolio_value),
+ "current_position_value": float(current_position_value),
+ "position_limit": float(position_limit),
+ "remaining_limit": float(remaining_position_limit),
+ "available_cash": float(portfolio.get("cash", 0)),
+ },
+ }
+
+ progress.update_status("risk_management_agent", ticker, "Done")
+
+ message = HumanMessage(
+ content=json.dumps(risk_analysis),
+ name="risk_management_agent",
+ )
+
+ if state["metadata"]["show_reasoning"]:
+ show_agent_reasoning(risk_analysis, "Risk Management Agent")
+
+ # Add the signal to the analyst_signals list
+ state["data"]["analyst_signals"]["risk_management_agent"] = risk_analysis
+
+ return {
+ "messages": state["messages"] + [message],
+ "data": data,
+ }
diff --git a/src/agents/crypto_sentiment_analyst.py b/src/agents/crypto_sentiment_analyst.py
new file mode 100644
index 000000000..b37fa1ed3
--- /dev/null
+++ b/src/agents/crypto_sentiment_analyst.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+import json
+
+from langchain_core.messages import HumanMessage
+
+from src.data.api import get_token_metrics
+from src.graph.state import AgentState, show_agent_reasoning
+from src.utils.progress import progress
+
+##### Crypto Sentiment Analyst #####
+
+
+def crypto_sentiment_analyst_agent(state: AgentState):
+ """Assess crypto sentiment from short-term price momentum."""
+ data = state.get("data", {})
+ pairs = data.get("pairs") or data.get("tickers") or []
+
+ analysis = {}
+ for pair in pairs:
+ token = pair.split("/")[0]
+ progress.update_status("crypto_sentiment_analyst_agent", pair, "Fetching metrics")
+ metrics = get_token_metrics(token)
+ market = metrics.get("market_data", {}) if metrics else {}
+ momentum = market.get("price_change_percentage_7d", 0)
+ signal = "bullish" if momentum > 0 else "bearish" if momentum < 0 else "neutral"
+ analysis[pair] = {
+ "signal": signal,
+ "confidence": abs(momentum),
+ "reasoning": f"7d price change {momentum}%",
+ }
+ progress.update_status("crypto_sentiment_analyst_agent", pair, "Done")
+
+ message = HumanMessage(content=json.dumps(analysis), name="crypto_sentiment_analyst_agent")
+ if state["metadata"]["show_reasoning"]:
+ show_agent_reasoning(analysis, "Crypto Sentiment Analyst")
+ state["data"]["analyst_signals"]["crypto_sentiment_analyst"] = analysis
+ return {"messages": [message], "data": data}
diff --git a/src/agents/on_chain_analyst.py b/src/agents/on_chain_analyst.py
new file mode 100644
index 000000000..6ce44b9b0
--- /dev/null
+++ b/src/agents/on_chain_analyst.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+import json
+
+from langchain_core.messages import HumanMessage
+
+from src.data.api import get_token_metrics
+from src.graph.state import AgentState, show_agent_reasoning
+from src.utils.progress import progress
+
+##### On-Chain Analyst #####
+
+
+def on_chain_analyst_agent(state: AgentState):
+ """Analyze on-chain metrics for crypto pairs."""
+ data = state.get("data", {})
+ pairs = data.get("pairs") or data.get("tickers") or []
+
+ analysis = {}
+ for pair in pairs:
+ token = pair.split("/")[0]
+ progress.update_status("on_chain_analyst_agent", pair, "Fetching metrics")
+ metrics = get_token_metrics(token)
+ market = metrics.get("market_data", {}) if metrics else {}
+ price_change = market.get("price_change_percentage_24h", 0)
+ signal = "bullish" if price_change > 0 else "bearish" if price_change < 0 else "neutral"
+ analysis[pair] = {
+ "signal": signal,
+ "confidence": abs(price_change),
+ "reasoning": f"24h price change {price_change}%",
+ }
+ progress.update_status("on_chain_analyst_agent", pair, "Done")
+
+ message = HumanMessage(content=json.dumps(analysis), name="on_chain_analyst_agent")
+ if state["metadata"]["show_reasoning"]:
+ show_agent_reasoning(analysis, "On-Chain Analyst")
+ state["data"]["analyst_signals"]["on_chain_analyst"] = analysis
+ return {"messages": [message], "data": data}
diff --git a/src/agents/peter_lynch.py b/src/agents/peter_lynch.py
index d56bd3535..2fa521069 100644
--- a/src/agents/peter_lynch.py
+++ b/src/agents/peter_lynch.py
@@ -1,25 +1,29 @@
+import json
+from typing import Any
+
+from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
+from pydantic import BaseModel
+from typing_extensions import Literal
+
from src.graph.state import AgentState, show_agent_reasoning
from src.tools.api import (
+ get_company_news,
get_financial_metrics,
- get_market_cap,
- search_line_items,
get_insider_trades,
- get_company_news,
+ get_market_cap,
get_prices,
+ search_line_items,
)
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.messages import HumanMessage
-from pydantic import BaseModel
-import json
-from typing_extensions import Literal
-from src.utils.progress import progress
from src.utils.llm import call_llm
+from src.utils.progress import progress
class PeterLynchSignal(BaseModel):
"""
Container for the Peter Lynch-style output signal.
"""
+
signal: Literal["bullish", "bearish", "neutral"]
confidence: float
reasoning: str
@@ -106,13 +110,7 @@ def peter_lynch_agent(state: AgentState):
# Combine partial scores with weights typical for Peter Lynch:
# 30% Growth, 25% Valuation, 20% Fundamentals,
# 15% Sentiment, 10% Insider Activity = 100%
- total_score = (
- growth_analysis["score"] * 0.30
- + valuation_analysis["score"] * 0.25
- + fundamentals_analysis["score"] * 0.20
- + sentiment_analysis["score"] * 0.15
- + insider_activity["score"] * 0.10
- )
+ total_score = growth_analysis["score"] * 0.30 + valuation_analysis["score"] * 0.25 + fundamentals_analysis["score"] * 0.20 + sentiment_analysis["score"] * 0.15 + insider_activity["score"] * 0.10
max_possible_score = 10.0
@@ -440,7 +438,7 @@ def analyze_insider_activity(insider_trades: list) -> dict:
def generate_lynch_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> PeterLynchSignal:
@@ -492,11 +490,7 @@ def generate_lynch_output(
prompt = template.invoke({"analysis_data": json.dumps(analysis_data, indent=2), "ticker": ticker})
def create_default_signal():
- return PeterLynchSignal(
- signal="neutral",
- confidence=0.0,
- reasoning="Error in analysis; defaulting to neutral"
- )
+ return PeterLynchSignal(signal="neutral", confidence=0.0, reasoning="Error in analysis; defaulting to neutral")
return call_llm(
prompt=prompt,
diff --git a/src/agents/phil_fisher.py b/src/agents/phil_fisher.py
index f2417f0e5..3c846df2a 100644
--- a/src/agents/phil_fisher.py
+++ b/src/agents/phil_fisher.py
@@ -1,19 +1,22 @@
+import json
+import statistics
+from typing import Any
+
+from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
+from pydantic import BaseModel
+from typing_extensions import Literal
+
from src.graph.state import AgentState, show_agent_reasoning
from src.tools.api import (
+ get_company_news,
get_financial_metrics,
+ get_insider_trades,
get_market_cap,
search_line_items,
- get_insider_trades,
- get_company_news,
)
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.messages import HumanMessage
-from pydantic import BaseModel
-import json
-from typing_extensions import Literal
-from src.utils.progress import progress
from src.utils.llm import call_llm
-import statistics
+from src.utils.progress import progress
class PhilFisherSignal(BaseModel):
@@ -107,14 +110,7 @@ def phil_fisher_agent(state: AgentState):
# 15% Valuation
# 5% Insider Activity
# 5% Sentiment
- total_score = (
- growth_quality["score"] * 0.30
- + margins_stability["score"] * 0.25
- + mgmt_efficiency["score"] * 0.20
- + fisher_valuation["score"] * 0.15
- + insider_activity["score"] * 0.05
- + sentiment_analysis["score"] * 0.05
- )
+ total_score = growth_quality["score"] * 0.30 + margins_stability["score"] * 0.25 + mgmt_efficiency["score"] * 0.20 + fisher_valuation["score"] * 0.15 + insider_activity["score"] * 0.05 + sentiment_analysis["score"] * 0.05
max_possible_score = 10
@@ -163,7 +159,7 @@ def phil_fisher_agent(state: AgentState):
state["data"]["analyst_signals"]["phil_fisher_agent"] = fisher_analysis
progress.update_status("phil_fisher_agent", None, "Done")
-
+
return {"messages": [message], "data": state["data"]}
@@ -529,7 +525,7 @@ def analyze_sentiment(news_items: list) -> dict:
def generate_fisher_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> PhilFisherSignal:
@@ -539,8 +535,8 @@ def generate_fisher_output(
template = ChatPromptTemplate.from_messages(
[
(
- "system",
- """You are a Phil Fisher AI agent, making investment decisions using his principles:
+ "system",
+ """You are a Phil Fisher AI agent, making investment decisions using his principles:
1. Emphasize long-term growth potential and quality of management.
2. Focus on companies investing in R&D for future products/services.
@@ -567,8 +563,8 @@ def generate_fisher_output(
""",
),
(
- "human",
- """Based on the following analysis, create a Phil Fisher-style investment signal.
+ "human",
+ """Based on the following analysis, create a Phil Fisher-style investment signal.
Analysis Data for {ticker}:
{analysis_data}
@@ -587,11 +583,7 @@ def generate_fisher_output(
prompt = template.invoke({"analysis_data": json.dumps(analysis_data, indent=2), "ticker": ticker})
def create_default_signal():
- return PhilFisherSignal(
- signal="neutral",
- confidence=0.0,
- reasoning="Error in analysis, defaulting to neutral"
- )
+ return PhilFisherSignal(signal="neutral", confidence=0.0, reasoning="Error in analysis, defaulting to neutral")
return call_llm(
prompt=prompt,
diff --git a/src/agents/rakesh_jhunjhunwala.py b/src/agents/rakesh_jhunjhunwala.py
index 9e40f8f61..c72c88768 100644
--- a/src/agents/rakesh_jhunjhunwala.py
+++ b/src/agents/rakesh_jhunjhunwala.py
@@ -1,18 +1,23 @@
-from src.graph.state import AgentState, show_agent_reasoning
-from langchain_core.prompts import ChatPromptTemplate
+import json
+from typing import Any
+
from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel
-import json
from typing_extensions import Literal
+
+from src.graph.state import AgentState, show_agent_reasoning
from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
from src.utils.llm import call_llm
from src.utils.progress import progress
+
class RakeshJhunjhunwalaSignal(BaseModel):
signal: Literal["bullish", "bearish", "neutral"]
confidence: float
reasoning: str
+
def rakesh_jhunjhunwala_agent(state: AgentState):
"""Analyzes stocks using Rakesh Jhunjhunwala's principles and LLM reasoning."""
data = state["data"]
@@ -32,21 +37,7 @@ def rakesh_jhunjhunwala_agent(state: AgentState):
progress.update_status("rakesh_jhunjhunwala_agent", ticker, "Fetching financial line items")
financial_line_items = search_line_items(
ticker,
- [
- "net_income",
- "earnings_per_share",
- "ebit",
- "operating_income",
- "revenue",
- "operating_margin",
- "total_assets",
- "total_liabilities",
- "current_assets",
- "current_liabilities",
- "free_cash_flow",
- "dividends_and_other_cash_distributions",
- "issuance_or_purchase_of_equity_shares"
- ],
+ ["net_income", "earnings_per_share", "ebit", "operating_income", "revenue", "operating_margin", "total_assets", "total_liabilities", "current_assets", "current_liabilities", "free_cash_flow", "dividends_and_other_cash_distributions", "issuance_or_purchase_of_equity_shares"],
end_date,
)
@@ -59,35 +50,27 @@ def rakesh_jhunjhunwala_agent(state: AgentState):
progress.update_status("rakesh_jhunjhunwala_agent", ticker, "Analyzing profitability")
profitability_analysis = analyze_profitability(financial_line_items)
-
+
progress.update_status("rakesh_jhunjhunwala_agent", ticker, "Analyzing balance sheet")
balancesheet_analysis = analyze_balance_sheet(financial_line_items)
-
+
progress.update_status("rakesh_jhunjhunwala_agent", ticker, "Analyzing cash flow")
cashflow_analysis = analyze_cash_flow(financial_line_items)
-
+
progress.update_status("rakesh_jhunjhunwala_agent", ticker, "Analyzing management actions")
management_analysis = analyze_management_actions(financial_line_items)
-
+
progress.update_status("rakesh_jhunjhunwala_agent", ticker, "Calculating intrinsic value")
# Calculate intrinsic value once
intrinsic_value = calculate_intrinsic_value(financial_line_items, market_cap)
# ─── Score & margin of safety ──────────────────────────────────────────
- total_score = (
- growth_analysis["score"]
- + profitability_analysis["score"]
- + balancesheet_analysis["score"]
- + cashflow_analysis["score"]
- + management_analysis["score"]
- )
+ total_score = growth_analysis["score"] + profitability_analysis["score"] + balancesheet_analysis["score"] + cashflow_analysis["score"] + management_analysis["score"]
# Fixed: Correct max_score calculation based on actual scoring breakdown
max_score = 24 # 8(prof) + 7(growth) + 4(bs) + 3(cf) + 2(mgmt) = 24
# Calculate margin of safety
- margin_of_safety = (
- (intrinsic_value - market_cap) / market_cap if intrinsic_value and market_cap else None
- )
+ margin_of_safety = (intrinsic_value - market_cap) / market_cap if intrinsic_value and market_cap else None
# Jhunjhunwala's decision rules (30% minimum margin of safety for conviction)
if margin_of_safety is not None and margin_of_safety >= 0.30:
@@ -111,11 +94,7 @@ def rakesh_jhunjhunwala_agent(state: AgentState):
confidence = min(max((total_score / max_score) * 100, 10), 80) # Based on score
# Create comprehensive analysis summary
- intrinsic_value_analysis = analyze_rakesh_jhunjhunwala_style(
- financial_line_items,
- intrinsic_value=intrinsic_value,
- current_price=market_cap
- )
+ intrinsic_value_analysis = analyze_rakesh_jhunjhunwala_style(financial_line_items, intrinsic_value=intrinsic_value, current_price=market_cap)
analysis_data[ticker] = {
"signal": signal,
@@ -157,7 +136,7 @@ def rakesh_jhunjhunwala_agent(state: AgentState):
return {"messages": [message], "data": state["data"]}
-def analyze_profitability(financial_line_items: list) -> dict[str, any]:
+def analyze_profitability(financial_line_items: list) -> dict[str, Any]:
"""
Analyze profitability metrics like net income, EBIT, EPS, operating income.
Focus on strong, consistent earnings growth and operating efficiency.
@@ -170,10 +149,8 @@ def analyze_profitability(financial_line_items: list) -> dict[str, any]:
reasoning = []
# Calculate ROE (Return on Equity) - Jhunjhunwala's key metric
- if (getattr(latest, 'net_income', None) and latest.net_income > 0 and
- getattr(latest, 'total_assets', None) and getattr(latest, 'total_liabilities', None) and
- latest.total_assets and latest.total_liabilities):
-
+ if getattr(latest, "net_income", None) and latest.net_income > 0 and getattr(latest, "total_assets", None) and getattr(latest, "total_liabilities", None) and latest.total_assets and latest.total_liabilities:
+
shareholders_equity = latest.total_assets - latest.total_liabilities
if shareholders_equity > 0:
roe = (latest.net_income / shareholders_equity) * 100
@@ -194,8 +171,7 @@ def analyze_profitability(financial_line_items: list) -> dict[str, any]:
reasoning.append("Unable to calculate ROE - missing data")
# Operating Margin Analysis
- if (getattr(latest, "operating_income", None) and latest.operating_income and
- getattr(latest, "revenue", None) and latest.revenue and latest.revenue > 0):
+ if getattr(latest, "operating_income", None) and latest.operating_income and getattr(latest, "revenue", None) and latest.revenue and latest.revenue > 0:
operating_margin = (latest.operating_income / latest.revenue) * 100
if operating_margin > 20: # Excellent margin
score += 2
@@ -211,17 +187,16 @@ def analyze_profitability(financial_line_items: list) -> dict[str, any]:
reasoning.append("Unable to calculate operating margin")
# EPS Growth Consistency (3-year trend)
- eps_values = [getattr(item, "earnings_per_share", None) for item in financial_line_items
- if getattr(item, "earnings_per_share", None) is not None and getattr(item, "earnings_per_share", None) > 0]
-
+ eps_values = [getattr(item, "earnings_per_share", None) for item in financial_line_items if getattr(item, "earnings_per_share", None) is not None and getattr(item, "earnings_per_share", None) > 0]
+
if len(eps_values) >= 3:
# Calculate CAGR for EPS
initial_eps = eps_values[-1] # Oldest value
- final_eps = eps_values[0] # Latest value
+ final_eps = eps_values[0] # Latest value
years = len(eps_values) - 1
-
+
if initial_eps > 0:
- eps_cagr = ((final_eps / initial_eps) ** (1/years) - 1) * 100
+ eps_cagr = ((final_eps / initial_eps) ** (1 / years) - 1) * 100
if eps_cagr > 20: # High growth
score += 3
reasoning.append(f"High EPS CAGR: {eps_cagr:.1f}%")
@@ -241,7 +216,7 @@ def analyze_profitability(financial_line_items: list) -> dict[str, any]:
return {"score": score, "details": "; ".join(reasoning)}
-def analyze_growth(financial_line_items: list) -> dict[str, any]:
+def analyze_growth(financial_line_items: list) -> dict[str, Any]:
"""
Analyze revenue and net income growth trends using CAGR.
Jhunjhunwala favored companies with strong, consistent compound growth.
@@ -253,17 +228,16 @@ def analyze_growth(financial_line_items: list) -> dict[str, any]:
reasoning = []
# Revenue CAGR Analysis
- revenues = [getattr(item, "revenue", None) for item in financial_line_items
- if getattr(item, "revenue", None) is not None and getattr(item, "revenue", None) > 0]
-
+ revenues = [getattr(item, "revenue", None) for item in financial_line_items if getattr(item, "revenue", None) is not None and getattr(item, "revenue", None) > 0]
+
if len(revenues) >= 3:
initial_revenue = revenues[-1] # Oldest
- final_revenue = revenues[0] # Latest
+ final_revenue = revenues[0] # Latest
years = len(revenues) - 1
-
+
if initial_revenue > 0: # Fixed: Add zero check
- revenue_cagr = ((final_revenue / initial_revenue) ** (1/years) - 1) * 100
-
+ revenue_cagr = ((final_revenue / initial_revenue) ** (1 / years) - 1) * 100
+
if revenue_cagr > 20: # High growth
score += 3
reasoning.append(f"Excellent revenue CAGR: {revenue_cagr:.1f}%")
@@ -281,17 +255,16 @@ def analyze_growth(financial_line_items: list) -> dict[str, any]:
reasoning.append("Insufficient revenue data for CAGR calculation")
# Net Income CAGR Analysis
- net_incomes = [getattr(item, "net_income", None) for item in financial_line_items
- if getattr(item, "net_income", None) is not None and getattr(item, "net_income", None) > 0]
-
+ net_incomes = [getattr(item, "net_income", None) for item in financial_line_items if getattr(item, "net_income", None) is not None and getattr(item, "net_income", None) > 0]
+
if len(net_incomes) >= 3:
initial_income = net_incomes[-1] # Oldest
- final_income = net_incomes[0] # Latest
+ final_income = net_incomes[0] # Latest
years = len(net_incomes) - 1
-
+
if initial_income > 0: # Fixed: Add zero check
- income_cagr = ((final_income / initial_income) ** (1/years) - 1) * 100
-
+ income_cagr = ((final_income / initial_income) ** (1 / years) - 1) * 100
+
if income_cagr > 25: # Very high growth
score += 3
reasoning.append(f"Excellent income CAGR: {income_cagr:.1f}%")
@@ -310,9 +283,9 @@ def analyze_growth(financial_line_items: list) -> dict[str, any]:
# Revenue Consistency Check (year-over-year)
if len(revenues) >= 3:
- declining_years = sum(1 for i in range(1, len(revenues)) if revenues[i-1] > revenues[i])
+ declining_years = sum(1 for i in range(1, len(revenues)) if revenues[i - 1] > revenues[i])
consistency_ratio = 1 - (declining_years / (len(revenues) - 1))
-
+
if consistency_ratio >= 0.8: # 80% or more years with growth
score += 1
reasoning.append(f"Consistent growth pattern ({consistency_ratio*100:.0f}% of years)")
@@ -322,7 +295,7 @@ def analyze_growth(financial_line_items: list) -> dict[str, any]:
return {"score": score, "details": "; ".join(reasoning)}
-def analyze_balance_sheet(financial_line_items: list) -> dict[str, any]:
+def analyze_balance_sheet(financial_line_items: list) -> dict[str, Any]:
"""
Check financial strength - healthy asset/liability structure, liquidity.
Jhunjhunwala favored companies with clean balance sheets and manageable debt.
@@ -335,9 +308,7 @@ def analyze_balance_sheet(financial_line_items: list) -> dict[str, any]:
reasoning = []
# Debt to asset ratio
- if (getattr(latest, "total_assets", None) and getattr(latest, "total_liabilities", None)
- and latest.total_assets and latest.total_liabilities
- and latest.total_assets > 0):
+ if getattr(latest, "total_assets", None) and getattr(latest, "total_liabilities", None) and latest.total_assets and latest.total_liabilities and latest.total_assets > 0:
debt_ratio = latest.total_liabilities / latest.total_assets
if debt_ratio < 0.5:
score += 2
@@ -351,9 +322,7 @@ def analyze_balance_sheet(financial_line_items: list) -> dict[str, any]:
reasoning.append("Insufficient data to calculate debt ratio")
# Current ratio (liquidity)
- if (getattr(latest, "current_assets", None) and getattr(latest, "current_liabilities", None)
- and latest.current_assets and latest.current_liabilities
- and latest.current_liabilities > 0):
+ if getattr(latest, "current_assets", None) and getattr(latest, "current_liabilities", None) and latest.current_assets and latest.current_liabilities and latest.current_liabilities > 0:
current_ratio = latest.current_assets / latest.current_liabilities
if current_ratio > 2.0:
score += 2
@@ -369,7 +338,7 @@ def analyze_balance_sheet(financial_line_items: list) -> dict[str, any]:
return {"score": score, "details": "; ".join(reasoning)}
-def analyze_cash_flow(financial_line_items: list) -> dict[str, any]:
+def analyze_cash_flow(financial_line_items: list) -> dict[str, Any]:
"""
Evaluate free cash flow and dividend behavior.
Jhunjhunwala appreciated companies generating strong free cash flow and rewarding shareholders.
@@ -404,7 +373,7 @@ def analyze_cash_flow(financial_line_items: list) -> dict[str, any]:
return {"score": score, "details": "; ".join(reasoning)}
-def analyze_management_actions(financial_line_items: list) -> dict[str, any]:
+def analyze_management_actions(financial_line_items: list) -> dict[str, Any]:
"""
Look at share issuance or buybacks to assess shareholder friendliness.
Jhunjhunwala liked managements who buy back shares or avoid dilution.
@@ -439,14 +408,13 @@ def assess_quality_metrics(financial_line_items: list) -> float:
"""
if not financial_line_items:
return 0.5 # Neutral score
-
+
latest = financial_line_items[0]
quality_factors = []
-
+
# ROE consistency and level
- if (getattr(latest, 'net_income', None) and getattr(latest, 'total_assets', None) and
- getattr(latest, 'total_liabilities', None) and latest.total_assets and latest.total_liabilities):
-
+ if getattr(latest, "net_income", None) and getattr(latest, "total_assets", None) and getattr(latest, "total_liabilities", None) and latest.total_assets and latest.total_liabilities:
+
shareholders_equity = latest.total_assets - latest.total_liabilities
if shareholders_equity > 0 and latest.net_income:
roe = latest.net_income / shareholders_equity
@@ -462,10 +430,9 @@ def assess_quality_metrics(financial_line_items: list) -> float:
quality_factors.append(0.0)
else:
quality_factors.append(0.5)
-
+
# Debt levels (lower is better)
- if (getattr(latest, 'total_assets', None) and getattr(latest, 'total_liabilities', None) and
- latest.total_assets and latest.total_liabilities):
+ if getattr(latest, "total_assets", None) and getattr(latest, "total_liabilities", None) and latest.total_assets and latest.total_liabilities:
debt_ratio = latest.total_liabilities / latest.total_assets
if debt_ratio < 0.3: # Low debt
quality_factors.append(1.0)
@@ -477,18 +444,17 @@ def assess_quality_metrics(financial_line_items: list) -> float:
quality_factors.append(0.1)
else:
quality_factors.append(0.5)
-
+
# Growth consistency
- net_incomes = [getattr(item, "net_income", None) for item in financial_line_items[:4]
- if getattr(item, "net_income", None) is not None and getattr(item, "net_income", None) > 0]
-
+ net_incomes = [getattr(item, "net_income", None) for item in financial_line_items[:4] if getattr(item, "net_income", None) is not None and getattr(item, "net_income", None) > 0]
+
if len(net_incomes) >= 3:
- declining_years = sum(1 for i in range(1, len(net_incomes)) if net_incomes[i-1] > net_incomes[i])
+ declining_years = sum(1 for i in range(1, len(net_incomes)) if net_incomes[i - 1] > net_incomes[i])
consistency = 1 - (declining_years / (len(net_incomes) - 1))
quality_factors.append(consistency)
else:
quality_factors.append(0.5)
-
+
# Return average quality score
return sum(quality_factors) / len(quality_factors) if quality_factors else 0.5
@@ -502,33 +468,32 @@ def calculate_intrinsic_value(financial_line_items: list, market_cap: float) ->
"""
if not financial_line_items or not market_cap:
return None
-
+
try:
latest = financial_line_items[0]
-
+
# Need positive earnings as base
- if not getattr(latest, 'net_income', None) or latest.net_income <= 0:
+ if not getattr(latest, "net_income", None) or latest.net_income <= 0:
return None
-
+
# Get historical earnings for growth calculation
- net_incomes = [getattr(item, "net_income", None) for item in financial_line_items[:5]
- if getattr(item, "net_income", None) is not None and getattr(item, "net_income", None) > 0]
-
+ net_incomes = [getattr(item, "net_income", None) for item in financial_line_items[:5] if getattr(item, "net_income", None) is not None and getattr(item, "net_income", None) > 0]
+
if len(net_incomes) < 2:
# Use current earnings with conservative multiple for stable companies
return latest.net_income * 12 # Conservative P/E of 12
-
+
# Calculate sustainable growth rate using historical data
initial_income = net_incomes[-1] # Oldest
- final_income = net_incomes[0] # Latest
+ final_income = net_incomes[0] # Latest
years = len(net_incomes) - 1
-
+
# Calculate historical CAGR
if initial_income > 0: # Fixed: Add zero check
- historical_growth = ((final_income / initial_income) ** (1/years) - 1)
+ historical_growth = (final_income / initial_income) ** (1 / years) - 1
else:
historical_growth = 0.05 # Default to 5%
-
+
# Conservative growth assumptions (Jhunjhunwala style)
if historical_growth > 0.25: # Cap at 25% for sustainability
sustainable_growth = 0.20 # Conservative 20%
@@ -538,10 +503,10 @@ def calculate_intrinsic_value(financial_line_items: list, market_cap: float) ->
sustainable_growth = historical_growth * 0.9 # 90% of historical
else:
sustainable_growth = 0.05 # Minimum 5% for inflation
-
+
# Quality assessment affects discount rate
quality_score = assess_quality_metrics(financial_line_items)
-
+
# Discount rate based on quality (Jhunjhunwala preferred quality)
if quality_score >= 0.8: # High quality
discount_rate = 0.12 # 12% for high quality companies
@@ -552,29 +517,29 @@ def calculate_intrinsic_value(financial_line_items: list, market_cap: float) ->
else: # Lower quality
discount_rate = 0.18 # 18% for riskier companies
terminal_multiple = 12
-
+
# Simple DCF with terminal value
current_earnings = latest.net_income
terminal_value = 0
dcf_value = 0
-
+
# Project 5 years of earnings
for year in range(1, 6):
projected_earnings = current_earnings * ((1 + sustainable_growth) ** year)
present_value = projected_earnings / ((1 + discount_rate) ** year)
dcf_value += present_value
-
+
# Terminal value (year 5 earnings * terminal multiple)
year_5_earnings = current_earnings * ((1 + sustainable_growth) ** 5)
terminal_value = (year_5_earnings * terminal_multiple) / ((1 + discount_rate) ** 5)
-
+
total_intrinsic_value = dcf_value + terminal_value
-
+
return total_intrinsic_value
-
+
except Exception:
# Fallback to simple earnings multiple
- if getattr(latest, 'net_income', None) and latest.net_income > 0:
+ if getattr(latest, "net_income", None) and latest.net_income > 0:
return latest.net_income * 15
return None
@@ -584,7 +549,7 @@ def analyze_rakesh_jhunjhunwala_style(
owner_earnings: float = None,
intrinsic_value: float = None,
current_price: float = None,
-) -> dict[str, any]:
+) -> dict[str, Any]:
"""
Comprehensive analysis in Rakesh Jhunjhunwala's investment style.
"""
@@ -595,21 +560,9 @@ def analyze_rakesh_jhunjhunwala_style(
cash_flow = analyze_cash_flow(financial_line_items)
management = analyze_management_actions(financial_line_items)
- total_score = (
- profitability["score"]
- + growth["score"]
- + balance_sheet["score"]
- + cash_flow["score"]
- + management["score"]
- )
+ total_score = profitability["score"] + growth["score"] + balance_sheet["score"] + cash_flow["score"] + management["score"]
- details = (
- f"Profitability: {profitability['details']}\n"
- f"Growth: {growth['details']}\n"
- f"Balance Sheet: {balance_sheet['details']}\n"
- f"Cash Flow: {cash_flow['details']}\n"
- f"Management Actions: {management['details']}"
- )
+ details = f"Profitability: {profitability['details']}\n" f"Growth: {growth['details']}\n" f"Balance Sheet: {balance_sheet['details']}\n" f"Cash Flow: {cash_flow['details']}\n" f"Management Actions: {management['details']}"
# Use provided intrinsic value or calculate if not provided
if not intrinsic_value:
@@ -641,7 +594,7 @@ def analyze_rakesh_jhunjhunwala_style(
# ────────────────────────────────────────────────────────────────────────────────
def generate_jhunjhunwala_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> RakeshJhunjhunwalaSignal:
@@ -704,4 +657,4 @@ def create_default_rakesh_jhunjhunwala_signal():
pydantic_model=RakeshJhunjhunwalaSignal,
agent_name="rakesh_jhunjhunwala_agent",
default_factory=create_default_rakesh_jhunjhunwala_signal,
- )
\ No newline at end of file
+ )
diff --git a/src/agents/risk_manager.py b/src/agents/risk_manager.py
index e8f1ccf12..e1baeb1d8 100644
--- a/src/agents/risk_manager.py
+++ b/src/agents/risk_manager.py
@@ -1,8 +1,15 @@
+import json
+
from langchain_core.messages import HumanMessage
+
from src.graph.state import AgentState, show_agent_reasoning
-from src.utils.progress import progress
from src.tools.api import get_prices, prices_to_df
-import json
+from src.utils.progress import progress
+
+
+def _max_position_size(portfolio: dict, remaining: float) -> float:
+ """Calculate max position size for equity trades."""
+ return min(remaining, portfolio.get("cash", 0))
##### Risk Management Agent #####
@@ -18,10 +25,10 @@ def risk_management_agent(state: AgentState):
# First, fetch prices for all relevant tickers
all_tickers = set(tickers) | set(portfolio.get("positions", {}).keys())
-
+
for ticker in all_tickers:
progress.update_status("risk_management_agent", ticker, "Fetching price data")
-
+
prices = get_prices(
ticker=ticker,
start_date=data["start_date"],
@@ -33,7 +40,7 @@ def risk_management_agent(state: AgentState):
continue
prices_df = prices_to_df(prices)
-
+
if not prices_df.empty:
current_price = prices_df["close"].iloc[-1]
current_prices[ticker] = current_price
@@ -43,48 +50,42 @@ def risk_management_agent(state: AgentState):
# Calculate total portfolio value based on current market prices (Net Liquidation Value)
total_portfolio_value = portfolio.get("cash", 0.0)
-
+
for ticker, position in portfolio.get("positions", {}).items():
if ticker in current_prices:
# Add market value of long positions
total_portfolio_value += position.get("long", 0) * current_prices[ticker]
# Subtract market value of short positions
total_portfolio_value -= position.get("short", 0) * current_prices[ticker]
-
+
progress.update_status("risk_management_agent", None, f"Total portfolio value: {total_portfolio_value}")
# Calculate risk limits for each ticker in the universe
for ticker in tickers:
progress.update_status("risk_management_agent", ticker, "Calculating position limits")
-
+
if ticker not in current_prices:
progress.update_status("risk_management_agent", ticker, "Failed: No price data available")
- risk_analysis[ticker] = {
- "remaining_position_limit": 0.0,
- "current_price": 0.0,
- "reasoning": {
- "error": "Missing price data for risk calculation"
- }
- }
+ risk_analysis[ticker] = {"remaining_position_limit": 0.0, "current_price": 0.0, "reasoning": {"error": "Missing price data for risk calculation"}}
continue
-
+
current_price = current_prices[ticker]
-
+
# Calculate current market value of this position
position = portfolio.get("positions", {}).get(ticker, {})
long_value = position.get("long", 0) * current_price
short_value = position.get("short", 0) * current_price
current_position_value = abs(long_value - short_value) # Use absolute exposure
-
+
# Calculate position limit (20% of total portfolio)
position_limit = total_portfolio_value * 0.20
-
+
# Calculate remaining limit for this position
remaining_position_limit = position_limit - current_position_value
-
+
# Ensure we don't exceed available cash
- max_position_size = min(remaining_position_limit, portfolio.get("cash", 0))
-
+ max_position_size = _max_position_size(portfolio, remaining_position_limit)
+
risk_analysis[ticker] = {
"remaining_position_limit": float(max_position_size),
"current_price": float(current_price),
@@ -96,7 +97,7 @@ def risk_management_agent(state: AgentState):
"available_cash": float(portfolio.get("cash", 0)),
},
}
-
+
progress.update_status("risk_management_agent", ticker, "Done")
message = HumanMessage(
diff --git a/src/agents/stanley_druckenmiller.py b/src/agents/stanley_druckenmiller.py
index 868c7a5ed..95b62664e 100644
--- a/src/agents/stanley_druckenmiller.py
+++ b/src/agents/stanley_druckenmiller.py
@@ -1,20 +1,23 @@
+import json
+import statistics
+from typing import Any
+
+from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
+from pydantic import BaseModel
+from typing_extensions import Literal
+
from src.graph.state import AgentState, show_agent_reasoning
from src.tools.api import (
+ get_company_news,
get_financial_metrics,
- get_market_cap,
- search_line_items,
get_insider_trades,
- get_company_news,
+ get_market_cap,
get_prices,
+ search_line_items,
)
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.messages import HumanMessage
-from pydantic import BaseModel
-import json
-from typing_extensions import Literal
-from src.utils.progress import progress
from src.utils.llm import call_llm
-import statistics
+from src.utils.progress import progress
class StanleyDruckenmillerSignal(BaseModel):
@@ -104,13 +107,7 @@ def stanley_druckenmiller_agent(state: AgentState):
# Combine partial scores with weights typical for Druckenmiller:
# 35% Growth/Momentum, 20% Risk/Reward, 20% Valuation,
# 15% Sentiment, 10% Insider Activity = 100%
- total_score = (
- growth_momentum_analysis["score"] * 0.35
- + risk_reward_analysis["score"] * 0.20
- + valuation_analysis["score"] * 0.20
- + sentiment_analysis["score"] * 0.15
- + insider_activity["score"] * 0.10
- )
+ total_score = growth_momentum_analysis["score"] * 0.35 + risk_reward_analysis["score"] * 0.20 + valuation_analysis["score"] * 0.20 + sentiment_analysis["score"] * 0.15 + insider_activity["score"] * 0.10
max_possible_score = 10
@@ -158,7 +155,7 @@ def stanley_druckenmiller_agent(state: AgentState):
state["data"]["analyst_signals"]["stanley_druckenmiller_agent"] = druck_analysis
progress.update_status("stanley_druckenmiller_agent", None, "Done")
-
+
return {"messages": [message], "data": state["data"]}
@@ -523,7 +520,7 @@ def analyze_druckenmiller_valuation(financial_line_items: list, market_cap: floa
def generate_druckenmiller_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> StanleyDruckenmillerSignal:
@@ -533,8 +530,8 @@ def generate_druckenmiller_output(
template = ChatPromptTemplate.from_messages(
[
(
- "system",
- """You are a Stanley Druckenmiller AI agent, making investment decisions using his principles:
+ "system",
+ """You are a Stanley Druckenmiller AI agent, making investment decisions using his principles:
1. Seek asymmetric risk-reward opportunities (large upside, limited downside).
2. Emphasize growth, momentum, and market sentiment.
@@ -562,8 +559,8 @@ def generate_druckenmiller_output(
""",
),
(
- "human",
- """Based on the following analysis, create a Druckenmiller-style investment signal.
+ "human",
+ """Based on the following analysis, create a Druckenmiller-style investment signal.
Analysis Data for {ticker}:
{analysis_data}
@@ -582,11 +579,7 @@ def generate_druckenmiller_output(
prompt = template.invoke({"analysis_data": json.dumps(analysis_data, indent=2), "ticker": ticker})
def create_default_signal():
- return StanleyDruckenmillerSignal(
- signal="neutral",
- confidence=0.0,
- reasoning="Error in analysis, defaulting to neutral"
- )
+ return StanleyDruckenmillerSignal(signal="neutral", confidence=0.0, reasoning="Error in analysis, defaulting to neutral")
return call_llm(
prompt=prompt,
diff --git a/src/agents/tokenomics_analyst.py b/src/agents/tokenomics_analyst.py
new file mode 100644
index 000000000..21befb8e6
--- /dev/null
+++ b/src/agents/tokenomics_analyst.py
@@ -0,0 +1,39 @@
+from __future__ import annotations
+
+import json
+
+from langchain_core.messages import HumanMessage
+
+from src.data.api import get_token_metrics
+from src.graph.state import AgentState, show_agent_reasoning
+from src.utils.progress import progress
+
+##### Tokenomics Analyst #####
+
+
+def tokenomics_analyst_agent(state: AgentState):
+ """Analyze tokenomics data for crypto pairs."""
+ data = state.get("data", {})
+ pairs = data.get("pairs") or data.get("tickers") or []
+
+ analysis = {}
+ for pair in pairs:
+ token = pair.split("/")[0]
+ progress.update_status("tokenomics_analyst_agent", pair, "Fetching metrics")
+ metrics = get_token_metrics(token)
+ supply = metrics.get("market_data", {}).get("circulating_supply", 0) if metrics else 0
+ max_supply = metrics.get("market_data", {}).get("max_supply", 0) if metrics else 0
+ ratio = supply / max_supply if max_supply else 0
+ signal = "bullish" if ratio < 0.5 else "bearish" if ratio > 0.9 else "neutral"
+ analysis[pair] = {
+ "signal": signal,
+ "confidence": ratio,
+ "reasoning": f"Circulating/max supply ratio {ratio:.2f}",
+ }
+ progress.update_status("tokenomics_analyst_agent", pair, "Done")
+
+ message = HumanMessage(content=json.dumps(analysis), name="tokenomics_analyst_agent")
+ if state["metadata"]["show_reasoning"]:
+ show_agent_reasoning(analysis, "Tokenomics Analyst")
+ state["data"]["analyst_signals"]["tokenomics_analyst"] = analysis
+ return {"messages": [message], "data": data}
diff --git a/src/agents/warren_buffett.py b/src/agents/warren_buffett.py
index a530491b3..a4fbe65df 100644
--- a/src/agents/warren_buffett.py
+++ b/src/agents/warren_buffett.py
@@ -1,9 +1,12 @@
-from src.graph.state import AgentState, show_agent_reasoning
-from langchain_core.prompts import ChatPromptTemplate
+import json
+from typing import Any
+
from langchain_core.messages import HumanMessage
+from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel
-import json
from typing_extensions import Literal
+
+from src.graph.state import AgentState, show_agent_reasoning
from src.tools.api import get_financial_metrics, get_market_cap, search_line_items
from src.utils.llm import call_llm
from src.utils.progress import progress
@@ -79,23 +82,10 @@ def warren_buffett_agent(state: AgentState):
intrinsic_value_analysis = calculate_intrinsic_value(financial_line_items)
# Calculate total score without circle of competence (LLM will handle that)
- total_score = (
- fundamental_analysis["score"] +
- consistency_analysis["score"] +
- moat_analysis["score"] +
- mgmt_analysis["score"] +
- pricing_power_analysis["score"] +
- book_value_analysis["score"]
- )
-
+ total_score = fundamental_analysis["score"] + consistency_analysis["score"] + moat_analysis["score"] + mgmt_analysis["score"] + pricing_power_analysis["score"] + book_value_analysis["score"]
+
# Update max possible score calculation
- max_possible_score = (
- 10 + # fundamental_analysis (ROE, debt, margins, current ratio)
- moat_analysis["max_score"] +
- mgmt_analysis["max_score"] +
- 5 + # pricing_power (0-5)
- 5 # book_value_growth (0-5)
- )
+ max_possible_score = 10 + moat_analysis["max_score"] + mgmt_analysis["max_score"] + 5 + 5 # fundamental_analysis (ROE, debt, margins, current ratio) # pricing_power (0-5) # book_value_growth (0-5)
# Add margin of safety analysis if we have both intrinsic value and current price
margin_of_safety = None
@@ -151,7 +141,7 @@ def warren_buffett_agent(state: AgentState):
return {"messages": [message], "data": state["data"]}
-def analyze_fundamentals(metrics: list) -> dict[str, any]:
+def analyze_fundamentals(metrics: list) -> dict[str, Any]:
"""Analyze company fundamentals based on Buffett's criteria."""
if not metrics:
return {"score": 0, "details": "Insufficient fundamental data"}
@@ -200,7 +190,7 @@ def analyze_fundamentals(metrics: list) -> dict[str, any]:
return {"score": score, "details": "; ".join(reasoning), "metrics": latest_metrics.model_dump()}
-def analyze_consistency(financial_line_items: list) -> dict[str, any]:
+def analyze_consistency(financial_line_items: list) -> dict[str, Any]:
"""Analyze earnings consistency and growth."""
if len(financial_line_items) < 4: # Need at least 4 periods for trend analysis
return {"score": 0, "details": "Insufficient historical data"}
@@ -233,7 +223,7 @@ def analyze_consistency(financial_line_items: list) -> dict[str, any]:
}
-def analyze_moat(metrics: list) -> dict[str, any]:
+def analyze_moat(metrics: list) -> dict[str, Any]:
"""
Evaluate whether the company likely has a durable competitive advantage (moat).
Enhanced to include multiple moat indicators that Buffett actually looks for:
@@ -252,13 +242,13 @@ def analyze_moat(metrics: list) -> dict[str, any]:
# 1. Return on Capital Consistency (Buffett's favorite moat indicator)
historical_roes = [m.return_on_equity for m in metrics if m.return_on_equity is not None]
- historical_roics = [m.return_on_invested_capital for m in metrics if hasattr(m, 'return_on_invested_capital') and m.return_on_invested_capital is not None]
-
+ historical_roics = [m.return_on_invested_capital for m in metrics if hasattr(m, "return_on_invested_capital") and m.return_on_invested_capital is not None]
+
if len(historical_roes) >= 5:
# Check for consistently high ROE (>15% for most periods)
high_roe_periods = sum(1 for roe in historical_roes if roe > 0.15)
roe_consistency = high_roe_periods / len(historical_roes)
-
+
if roe_consistency >= 0.8: # 80%+ of periods with ROE > 15%
moat_score += 2
avg_roe = sum(historical_roes) / len(historical_roes)
@@ -278,10 +268,10 @@ def analyze_moat(metrics: list) -> dict[str, any]:
avg_margin = sum(historical_margins) / len(historical_margins)
recent_margins = historical_margins[:3] # Last 3 periods
older_margins = historical_margins[-3:] # First 3 periods
-
+
recent_avg = sum(recent_margins) / len(recent_margins)
older_avg = sum(older_margins) / len(older_margins)
-
+
if avg_margin > 0.2 and recent_avg >= older_avg: # 20%+ margins and stable/improving
moat_score += 1
reasoning.append(f"Strong and stable operating margins (avg: {avg_margin:.1%}) indicate pricing power moat")
@@ -289,37 +279,37 @@ def analyze_moat(metrics: list) -> dict[str, any]:
reasoning.append(f"Decent operating margins (avg: {avg_margin:.1%}) suggest some competitive advantage")
else:
reasoning.append(f"Low operating margins (avg: {avg_margin:.1%}) suggest limited pricing power")
-
+
# 3. Asset Efficiency and Scale Advantages
if len(metrics) >= 5:
# Check asset turnover trends (revenue efficiency)
asset_turnovers = []
for m in metrics:
- if hasattr(m, 'asset_turnover') and m.asset_turnover is not None:
+ if hasattr(m, "asset_turnover") and m.asset_turnover is not None:
asset_turnovers.append(m.asset_turnover)
-
+
if len(asset_turnovers) >= 3:
if any(turnover > 1.0 for turnover in asset_turnovers): # Efficient asset use
moat_score += 1
reasoning.append("Efficient asset utilization suggests operational moat")
-
+
# 4. Competitive Position Strength (inferred from trend stability)
if len(historical_roes) >= 5 and len(historical_margins) >= 5:
# Calculate coefficient of variation (stability measure)
roe_avg = sum(historical_roes) / len(historical_roes)
roe_variance = sum((roe - roe_avg) ** 2 for roe in historical_roes) / len(historical_roes)
- roe_stability = 1 - (roe_variance ** 0.5) / roe_avg if roe_avg > 0 else 0
-
+ roe_stability = 1 - (roe_variance**0.5) / roe_avg if roe_avg > 0 else 0
+
margin_avg = sum(historical_margins) / len(historical_margins)
margin_variance = sum((margin - margin_avg) ** 2 for margin in historical_margins) / len(historical_margins)
- margin_stability = 1 - (margin_variance ** 0.5) / margin_avg if margin_avg > 0 else 0
-
+ margin_stability = 1 - (margin_variance**0.5) / margin_avg if margin_avg > 0 else 0
+
overall_stability = (roe_stability + margin_stability) / 2
-
+
if overall_stability > 0.7: # High stability indicates strong competitive position
moat_score += 1
reasoning.append(f"High performance stability ({overall_stability:.1%}) suggests strong competitive moat")
-
+
# Cap the score at max_score
moat_score = min(moat_score, max_score)
@@ -330,7 +320,7 @@ def analyze_moat(metrics: list) -> dict[str, any]:
}
-def analyze_management_quality(financial_line_items: list) -> dict[str, any]:
+def analyze_management_quality(financial_line_items: list) -> dict[str, Any]:
"""
Checks for share dilution or consistent buybacks, and some dividend track record.
A simplified approach:
@@ -370,7 +360,7 @@ def analyze_management_quality(financial_line_items: list) -> dict[str, any]:
}
-def calculate_owner_earnings(financial_line_items: list) -> dict[str, any]:
+def calculate_owner_earnings(financial_line_items: list) -> dict[str, Any]:
"""
Calculate owner earnings (Buffett's preferred measure of true earnings power).
Enhanced methodology: Net Income + Depreciation/Amortization - Maintenance CapEx - Working Capital Changes
@@ -389,25 +379,28 @@ def calculate_owner_earnings(financial_line_items: list) -> dict[str, any]:
if not all([net_income is not None, depreciation is not None, capex is not None]):
missing = []
- if net_income is None: missing.append("net income")
- if depreciation is None: missing.append("depreciation")
- if capex is None: missing.append("capital expenditure")
+ if net_income is None:
+ missing.append("net income")
+ if depreciation is None:
+ missing.append("depreciation")
+ if capex is None:
+ missing.append("capital expenditure")
return {"owner_earnings": None, "details": [f"Missing components: {', '.join(missing)}"]}
# Enhanced maintenance capex estimation using historical analysis
maintenance_capex = estimate_maintenance_capex(financial_line_items)
-
+
# Working capital change analysis (if data available)
working_capital_change = 0
if len(financial_line_items) >= 2:
try:
- current_assets_current = getattr(latest, 'current_assets', None)
- current_liab_current = getattr(latest, 'current_liabilities', None)
-
+ current_assets_current = getattr(latest, "current_assets", None)
+ current_liab_current = getattr(latest, "current_liabilities", None)
+
previous = financial_line_items[1]
- current_assets_previous = getattr(previous, 'current_assets', None)
- current_liab_previous = getattr(previous, 'current_liabilities', None)
-
+ current_assets_previous = getattr(previous, "current_assets", None)
+ current_liab_previous = getattr(previous, "current_liabilities", None)
+
if all([current_assets_current, current_liab_current, current_assets_previous, current_liab_previous]):
wc_current = current_assets_current - current_liab_current
wc_previous = current_assets_previous - current_liab_previous
@@ -422,26 +415,15 @@ def calculate_owner_earnings(financial_line_items: list) -> dict[str, any]:
# Sanity checks
if owner_earnings < net_income * 0.3: # Owner earnings shouldn't be less than 30% of net income typically
details.append("Warning: Owner earnings significantly below net income - high capex intensity")
-
+
if maintenance_capex > depreciation * 2: # Maintenance capex shouldn't typically exceed 2x depreciation
details.append("Warning: Estimated maintenance capex seems high relative to depreciation")
- details.extend([
- f"Net income: ${net_income:,.0f}",
- f"Depreciation: ${depreciation:,.0f}",
- f"Estimated maintenance capex: ${maintenance_capex:,.0f}",
- f"Owner earnings: ${owner_earnings:,.0f}"
- ])
+ details.extend([f"Net income: ${net_income:,.0f}", f"Depreciation: ${depreciation:,.0f}", f"Estimated maintenance capex: ${maintenance_capex:,.0f}", f"Owner earnings: ${owner_earnings:,.0f}"])
return {
"owner_earnings": owner_earnings,
- "components": {
- "net_income": net_income,
- "depreciation": depreciation,
- "maintenance_capex": maintenance_capex,
- "working_capital_change": working_capital_change,
- "total_capex": abs(capex) if capex else 0
- },
+ "components": {"net_income": net_income, "depreciation": depreciation, "maintenance_capex": maintenance_capex, "working_capital_change": working_capital_change, "total_capex": abs(capex) if capex else 0},
"details": details,
}
@@ -453,40 +435,40 @@ def estimate_maintenance_capex(financial_line_items: list) -> float:
"""
if not financial_line_items:
return 0
-
+
# Approach 1: Historical average as % of revenue
capex_ratios = []
depreciation_values = []
-
+
for item in financial_line_items[:5]: # Last 5 periods
- if hasattr(item, 'capital_expenditure') and hasattr(item, 'revenue'):
+ if hasattr(item, "capital_expenditure") and hasattr(item, "revenue"):
if item.capital_expenditure and item.revenue and item.revenue > 0:
capex_ratio = abs(item.capital_expenditure) / item.revenue
capex_ratios.append(capex_ratio)
-
- if hasattr(item, 'depreciation_and_amortization') and item.depreciation_and_amortization:
+
+ if hasattr(item, "depreciation_and_amortization") and item.depreciation_and_amortization:
depreciation_values.append(item.depreciation_and_amortization)
-
+
# Approach 2: Percentage of depreciation (typically 80-120% for maintenance)
latest_depreciation = financial_line_items[0].depreciation_and_amortization if financial_line_items[0].depreciation_and_amortization else 0
-
+
# Approach 3: Industry-specific heuristics
latest_capex = abs(financial_line_items[0].capital_expenditure) if financial_line_items[0].capital_expenditure else 0
-
+
# Conservative estimate: Use the higher of:
# 1. 85% of total capex (assuming 15% is growth capex)
# 2. 100% of depreciation (replacement of worn-out assets)
# 3. Historical average if stable
-
+
method_1 = latest_capex * 0.85 # 85% of total capex
method_2 = latest_depreciation # 100% of depreciation
-
+
# If we have historical data, use average capex ratio
if len(capex_ratios) >= 3:
avg_capex_ratio = sum(capex_ratios) / len(capex_ratios)
- latest_revenue = financial_line_items[0].revenue if hasattr(financial_line_items[0], 'revenue') and financial_line_items[0].revenue else 0
+ latest_revenue = financial_line_items[0].revenue if hasattr(financial_line_items[0], "revenue") and financial_line_items[0].revenue else 0
method_3 = avg_capex_ratio * latest_revenue if latest_revenue else 0
-
+
# Use the median of the three approaches for conservatism
estimates = sorted([method_1, method_2, method_3])
return estimates[1] # Median
@@ -495,7 +477,7 @@ def estimate_maintenance_capex(financial_line_items: list) -> float:
return max(method_1, method_2)
-def calculate_intrinsic_value(financial_line_items: list) -> dict[str, any]:
+def calculate_intrinsic_value(financial_line_items: list) -> dict[str, Any]:
"""
Calculate intrinsic value using enhanced DCF with owner earnings.
Uses more sophisticated assumptions and conservative approach like Buffett.
@@ -517,21 +499,21 @@ def calculate_intrinsic_value(financial_line_items: list) -> dict[str, any]:
# Enhanced DCF with more realistic assumptions
details = []
-
+
# Estimate growth rate based on historical performance (more conservative)
historical_earnings = []
for item in financial_line_items[:5]: # Last 5 years
- if hasattr(item, 'net_income') and item.net_income:
+ if hasattr(item, "net_income") and item.net_income:
historical_earnings.append(item.net_income)
-
+
# Calculate historical growth rate
if len(historical_earnings) >= 3:
oldest_earnings = historical_earnings[-1]
latest_earnings = historical_earnings[0]
years = len(historical_earnings) - 1
-
+
if oldest_earnings > 0:
- historical_growth = ((latest_earnings / oldest_earnings) ** (1/years)) - 1
+ historical_growth = ((latest_earnings / oldest_earnings) ** (1 / years)) - 1
# Conservative adjustment - cap growth and apply haircut
historical_growth = max(-0.05, min(historical_growth, 0.15)) # Cap between -5% and 15%
conservative_growth = historical_growth * 0.7 # Apply 30% haircut for conservatism
@@ -539,33 +521,33 @@ def calculate_intrinsic_value(financial_line_items: list) -> dict[str, any]:
conservative_growth = 0.03 # Default 3% if negative base
else:
conservative_growth = 0.03 # Default conservative growth
-
+
# Buffett's conservative assumptions
stage1_growth = min(conservative_growth, 0.08) # Stage 1: cap at 8%
stage2_growth = min(conservative_growth * 0.5, 0.04) # Stage 2: half of stage 1, cap at 4%
terminal_growth = 0.025 # Long-term GDP growth rate
-
+
# Risk-adjusted discount rate based on business quality
base_discount_rate = 0.09 # Base 9%
-
+
# Adjust based on analysis scores (if available in calling context)
# For now, use conservative 10%
discount_rate = 0.10
-
+
# Three-stage DCF model
- stage1_years = 5 # High growth phase
- stage2_years = 5 # Transition phase
-
+ stage1_years = 5 # High growth phase
+ stage2_years = 5 # Transition phase
+
present_value = 0
details.append(f"Using three-stage DCF: Stage 1 ({stage1_growth:.1%}, {stage1_years}y), Stage 2 ({stage2_growth:.1%}, {stage2_years}y), Terminal ({terminal_growth:.1%})")
-
+
# Stage 1: Higher growth
stage1_pv = 0
for year in range(1, stage1_years + 1):
future_earnings = owner_earnings * (1 + stage1_growth) ** year
pv = future_earnings / (1 + discount_rate) ** year
stage1_pv += pv
-
+
# Stage 2: Transition growth
stage2_pv = 0
stage1_final_earnings = owner_earnings * (1 + stage1_growth) ** stage1_years
@@ -573,28 +555,20 @@ def calculate_intrinsic_value(financial_line_items: list) -> dict[str, any]:
future_earnings = stage1_final_earnings * (1 + stage2_growth) ** year
pv = future_earnings / (1 + discount_rate) ** (stage1_years + year)
stage2_pv += pv
-
+
# Terminal value using Gordon Growth Model
final_earnings = stage1_final_earnings * (1 + stage2_growth) ** stage2_years
terminal_earnings = final_earnings * (1 + terminal_growth)
terminal_value = terminal_earnings / (discount_rate - terminal_growth)
terminal_pv = terminal_value / (1 + discount_rate) ** (stage1_years + stage2_years)
-
+
# Total intrinsic value
intrinsic_value = stage1_pv + stage2_pv + terminal_pv
-
+
# Apply additional margin of safety (Buffett's conservatism)
conservative_intrinsic_value = intrinsic_value * 0.85 # 15% additional haircut
-
- details.extend([
- f"Stage 1 PV: ${stage1_pv:,.0f}",
- f"Stage 2 PV: ${stage2_pv:,.0f}",
- f"Terminal PV: ${terminal_pv:,.0f}",
- f"Total IV: ${intrinsic_value:,.0f}",
- f"Conservative IV (15% haircut): ${conservative_intrinsic_value:,.0f}",
- f"Owner earnings: ${owner_earnings:,.0f}",
- f"Discount rate: {discount_rate:.1%}"
- ])
+
+ details.extend([f"Stage 1 PV: ${stage1_pv:,.0f}", f"Stage 2 PV: ${stage2_pv:,.0f}", f"Terminal PV: ${terminal_pv:,.0f}", f"Total IV: ${intrinsic_value:,.0f}", f"Conservative IV (15% haircut): ${conservative_intrinsic_value:,.0f}", f"Owner earnings: ${owner_earnings:,.0f}", f"Discount rate: {discount_rate:.1%}"])
return {
"intrinsic_value": conservative_intrinsic_value,
@@ -607,39 +581,40 @@ def calculate_intrinsic_value(financial_line_items: list) -> dict[str, any]:
"discount_rate": discount_rate,
"stage1_years": stage1_years,
"stage2_years": stage2_years,
- "historical_growth": conservative_growth if 'conservative_growth' in locals() else None,
+ "historical_growth": conservative_growth if "conservative_growth" in locals() else None,
},
"details": details,
}
-def analyze_book_value_growth(financial_line_items: list) -> dict[str, any]:
+
+def analyze_book_value_growth(financial_line_items: list) -> dict[str, Any]:
"""
Analyze book value per share growth - a key Buffett metric for long-term value creation.
Buffett often talks about companies that compound book value over decades.
"""
if len(financial_line_items) < 3:
return {"score": 0, "details": "Insufficient data for book value analysis"}
-
+
score = 0
reasoning = []
-
+
# Calculate book value growth (shareholders equity / shares outstanding)
book_values = []
for item in financial_line_items:
- if hasattr(item, 'shareholders_equity') and hasattr(item, 'outstanding_shares'):
+ if hasattr(item, "shareholders_equity") and hasattr(item, "outstanding_shares"):
if item.shareholders_equity and item.outstanding_shares:
book_value_per_share = item.shareholders_equity / item.outstanding_shares
book_values.append(book_value_per_share)
-
+
if len(book_values) >= 3:
# Check for consistent book value growth
growth_periods = 0
for i in range(len(book_values) - 1):
if book_values[i] > book_values[i + 1]: # Current > Previous (reverse chronological)
growth_periods += 1
-
+
growth_rate = growth_periods / (len(book_values) - 1)
-
+
if growth_rate >= 0.8: # 80% of periods show growth
score += 3
reasoning.append("Consistent book value per share growth (Buffett's favorite metric)")
@@ -651,14 +626,14 @@ def analyze_book_value_growth(financial_line_items: list) -> dict[str, any]:
reasoning.append("Moderate book value per share growth")
else:
reasoning.append("Inconsistent book value per share growth")
-
+
# Calculate compound annual growth rate
if len(book_values) >= 2:
oldest_bv = book_values[-1]
latest_bv = book_values[0]
years = len(book_values) - 1
if oldest_bv > 0:
- cagr = ((latest_bv / oldest_bv) ** (1/years)) - 1
+ cagr = ((latest_bv / oldest_bv) ** (1 / years)) - 1
if cagr > 0.15: # 15%+ CAGR
score += 2
reasoning.append(f"Excellent book value CAGR: {cagr:.1%}")
@@ -667,35 +642,32 @@ def analyze_book_value_growth(financial_line_items: list) -> dict[str, any]:
reasoning.append(f"Good book value CAGR: {cagr:.1%}")
else:
reasoning.append("Insufficient book value data for growth analysis")
-
- return {
- "score": score,
- "details": "; ".join(reasoning)
- }
+ return {"score": score, "details": "; ".join(reasoning)}
-def analyze_pricing_power(financial_line_items: list, metrics: list) -> dict[str, any]:
+
+def analyze_pricing_power(financial_line_items: list, metrics: list) -> dict[str, Any]:
"""
Analyze pricing power - Buffett's key indicator of a business moat.
Looks at ability to raise prices without losing customers (margin expansion during inflation).
"""
if not financial_line_items or not metrics:
return {"score": 0, "details": "Insufficient data for pricing power analysis"}
-
+
score = 0
reasoning = []
-
+
# Check gross margin trends (ability to maintain/expand margins)
gross_margins = []
for item in financial_line_items:
- if hasattr(item, 'gross_margin') and item.gross_margin is not None:
+ if hasattr(item, "gross_margin") and item.gross_margin is not None:
gross_margins.append(item.gross_margin)
-
+
if len(gross_margins) >= 3:
# Check margin stability/improvement
recent_avg = sum(gross_margins[:2]) / 2 if len(gross_margins) >= 2 else gross_margins[0]
older_avg = sum(gross_margins[-2:]) / 2 if len(gross_margins) >= 2 else gross_margins[-1]
-
+
if recent_avg > older_avg + 0.02: # 2%+ improvement
score += 3
reasoning.append("Expanding gross margins indicate strong pricing power")
@@ -707,7 +679,7 @@ def analyze_pricing_power(financial_line_items: list, metrics: list) -> dict[str
reasoning.append("Stable gross margins during economic uncertainty")
else:
reasoning.append("Declining gross margins may indicate pricing pressure")
-
+
# Check if company has been able to maintain high margins consistently
if gross_margins:
avg_margin = sum(gross_margins) / len(gross_margins)
@@ -717,16 +689,13 @@ def analyze_pricing_power(financial_line_items: list, metrics: list) -> dict[str
elif avg_margin > 0.3: # 30%+ gross margins
score += 1
reasoning.append(f"Good gross margins ({avg_margin:.1%}) suggest decent pricing power")
-
- return {
- "score": score,
- "details": "; ".join(reasoning) if reasoning else "Limited pricing power analysis available"
- }
+
+ return {"score": score, "details": "; ".join(reasoning) if reasoning else "Limited pricing power analysis available"}
def generate_buffett_output(
ticker: str,
- analysis_data: dict[str, any],
+ analysis_data: dict[str, Any],
model_name: str,
model_provider: str,
) -> WarrenBuffettSignal:
diff --git a/src/config.py b/src/config.py
new file mode 100644
index 000000000..2e83e5f28
--- /dev/null
+++ b/src/config.py
@@ -0,0 +1,6 @@
+import os
+
+IS_CRYPTO: bool = os.getenv("ASSET_CLASS", "EQUITY").upper() == "CRYPTO"
+ALLOW_MARGIN: bool = os.getenv("ALLOW_MARGIN", "0") == "1"
+
+__all__ = ["IS_CRYPTO", "ALLOW_MARGIN"]
diff --git a/src/data/api.py b/src/data/api.py
new file mode 100644
index 000000000..ec76c3237
--- /dev/null
+++ b/src/data/api.py
@@ -0,0 +1,85 @@
+"""Data abstraction layer for price and token metrics."""
+
+from __future__ import annotations
+
+import datetime as _dt
+from typing import Any
+
+from src.config import IS_CRYPTO
+from src.data.cache import get_cache
+
+if IS_CRYPTO:
+ import ccxt # type: ignore
+ from pycoingecko import CoinGeckoAPI # type: ignore
+
+from src.tools.api import get_prices
+
+_cache = get_cache()
+
+
+def _parse_timestamp(ts: int) -> str:
+ return _dt.datetime.utcfromtimestamp(ts / 1000).isoformat()
+
+
+def get_crypto_ohlcv_ccxt(pair: str, start: str, end: str, timeframe: str = "1d", exchange: str = "binance") -> list[dict[str, Any]]:
+ """Fetch OHLCV for a crypto pair via CCXT.
+
+ Args:
+ pair: Trading pair like ``"BTC/USDT"``.
+ start: ISO8601 start time.
+ end: ISO8601 end time.
+ timeframe: Bar size (e.g. ``"1h"``).
+ exchange: Exchange id compatible with CCXT.
+
+ Returns:
+ List of OHLCV dictionaries.
+ """
+ ex = getattr(ccxt, exchange)() # type: ignore[attr-defined]
+ since = ex.parse8601(start)
+ end_ms = ex.parse8601(end)
+ all_bars: list[list[Any]] = []
+ limit = 1000
+ while since < end_ms:
+ data = ex.fetch_ohlcv(pair, timeframe=timeframe, since=since, limit=limit)
+ if not data:
+ break
+ all_bars.extend(data)
+ since = data[-1][0] + ex.parse_timeframe(timeframe) * 1000
+ if len(data) < limit:
+ break
+ bars = [b for b in all_bars if b[0] <= end_ms]
+ return [
+ {
+ "open": b[1],
+ "high": b[2],
+ "low": b[3],
+ "close": b[4],
+ "volume": b[5],
+ "time": _parse_timestamp(b[0]),
+ }
+ for b in bars
+ ]
+
+
+def get_token_metrics(id_or_symbol: str) -> dict[str, Any]:
+ """Fetch token metrics from CoinGecko."""
+ cg = CoinGeckoAPI()
+ try:
+ data = cg.get_coin_by_id(id_or_symbol)
+ except Exception:
+ data = cg.get_coin_by_id(id_or_symbol.lower())
+ return data
+
+
+def get_price_ohlcv(symbol_or_pair: str, start: str, end: str, timeframe: str = "1d", exchange: str = "binance") -> list[dict[str, Any]]:
+ """Unified price fetcher. Routes to Yahoo or CCXT based on ``config.IS_CRYPTO``."""
+ if IS_CRYPTO:
+ cache_key = f"{symbol_or_pair}_{exchange}_{timeframe}_{start}_{end}"
+ if cached := _cache.get_prices(cache_key):
+ return cached
+ bars = get_crypto_ohlcv_ccxt(symbol_or_pair, start, end, timeframe, exchange)
+ _cache.set_prices(cache_key, bars)
+ return bars
+ else:
+ prices = get_prices(symbol_or_pair, start, end)
+ return [p.model_dump() for p in prices]
diff --git a/src/data/cache.py b/src/data/cache.py
index 4127934e3..0e68e32f4 100644
--- a/src/data/cache.py
+++ b/src/data/cache.py
@@ -1,12 +1,15 @@
+from typing import Any
+
+
class Cache:
"""In-memory cache for API responses."""
def __init__(self):
- self._prices_cache: dict[str, list[dict[str, any]]] = {}
- self._financial_metrics_cache: dict[str, list[dict[str, any]]] = {}
- self._line_items_cache: dict[str, list[dict[str, any]]] = {}
- self._insider_trades_cache: dict[str, list[dict[str, any]]] = {}
- self._company_news_cache: dict[str, list[dict[str, any]]] = {}
+ self._prices_cache: dict[str, list[dict[str, Any]]] = {}
+ self._financial_metrics_cache: dict[str, list[dict[str, Any]]] = {}
+ self._line_items_cache: dict[str, list[dict[str, Any]]] = {}
+ self._insider_trades_cache: dict[str, list[dict[str, Any]]] = {}
+ self._company_news_cache: dict[str, list[dict[str, Any]]] = {}
def _merge_data(self, existing: list[dict] | None, new_data: list[dict], key_field: str) -> list[dict]:
"""Merge existing and new data, avoiding duplicates based on a key field."""
@@ -21,43 +24,43 @@ def _merge_data(self, existing: list[dict] | None, new_data: list[dict], key_fie
merged.extend([item for item in new_data if item[key_field] not in existing_keys])
return merged
- def get_prices(self, ticker: str) -> list[dict[str, any]] | None:
+ def get_prices(self, ticker: str) -> list[dict[str, Any]] | None:
"""Get cached price data if available."""
return self._prices_cache.get(ticker)
- def set_prices(self, ticker: str, data: list[dict[str, any]]):
+ def set_prices(self, ticker: str, data: list[dict[str, Any]]):
"""Append new price data to cache."""
self._prices_cache[ticker] = self._merge_data(self._prices_cache.get(ticker), data, key_field="time")
- def get_financial_metrics(self, ticker: str) -> list[dict[str, any]]:
+ def get_financial_metrics(self, ticker: str) -> list[dict[str, Any]]:
"""Get cached financial metrics if available."""
return self._financial_metrics_cache.get(ticker)
- def set_financial_metrics(self, ticker: str, data: list[dict[str, any]]):
+ def set_financial_metrics(self, ticker: str, data: list[dict[str, Any]]):
"""Append new financial metrics to cache."""
self._financial_metrics_cache[ticker] = self._merge_data(self._financial_metrics_cache.get(ticker), data, key_field="report_period")
- def get_line_items(self, ticker: str) -> list[dict[str, any]] | None:
+ def get_line_items(self, ticker: str) -> list[dict[str, Any]] | None:
"""Get cached line items if available."""
return self._line_items_cache.get(ticker)
- def set_line_items(self, ticker: str, data: list[dict[str, any]]):
+ def set_line_items(self, ticker: str, data: list[dict[str, Any]]):
"""Append new line items to cache."""
self._line_items_cache[ticker] = self._merge_data(self._line_items_cache.get(ticker), data, key_field="report_period")
- def get_insider_trades(self, ticker: str) -> list[dict[str, any]] | None:
+ def get_insider_trades(self, ticker: str) -> list[dict[str, Any]] | None:
"""Get cached insider trades if available."""
return self._insider_trades_cache.get(ticker)
- def set_insider_trades(self, ticker: str, data: list[dict[str, any]]):
+ def set_insider_trades(self, ticker: str, data: list[dict[str, Any]]):
"""Append new insider trades to cache."""
self._insider_trades_cache[ticker] = self._merge_data(self._insider_trades_cache.get(ticker), data, key_field="filing_date") # Could also use transaction_date if preferred
- def get_company_news(self, ticker: str) -> list[dict[str, any]] | None:
+ def get_company_news(self, ticker: str) -> list[dict[str, Any]] | None:
"""Get cached company news if available."""
return self._company_news_cache.get(ticker)
- def set_company_news(self, ticker: str, data: list[dict[str, any]]):
+ def set_company_news(self, ticker: str, data: list[dict[str, Any]]):
"""Append new company news to cache."""
self._company_news_cache[ticker] = self._merge_data(self._company_news_cache.get(ticker), data, key_field="date")
diff --git a/src/graph/state.py b/src/graph/state.py
index f8dc42216..e6a18d8e8 100644
--- a/src/graph/state.py
+++ b/src/graph/state.py
@@ -1,21 +1,20 @@
-from typing_extensions import Annotated, Sequence, TypedDict
-
+import json
import operator
-from langchain_core.messages import BaseMessage
+from typing import Any
-
-import json
+from langchain_core.messages import BaseMessage
+from typing_extensions import Annotated, Sequence, TypedDict
-def merge_dicts(a: dict[str, any], b: dict[str, any]) -> dict[str, any]:
+def merge_dicts(a: dict[str, Any], b: dict[str, Any]) -> dict[str, Any]:
return {**a, **b}
# Define agent state
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
- data: Annotated[dict[str, any], merge_dicts]
- metadata: Annotated[dict[str, any], merge_dicts]
+ data: Annotated[dict[str, Any], merge_dicts]
+ metadata: Annotated[dict[str, Any], merge_dicts]
def show_agent_reasoning(output, agent_name):
diff --git a/src/main.py b/src/main.py
index 230695ad0..c19e4ceb4 100644
--- a/src/main.py
+++ b/src/main.py
@@ -18,6 +18,7 @@
from datetime import datetime
from dateutil.relativedelta import relativedelta
from src.utils.visualize import save_graph_as_png
+from src.utils.parsing import parse_hedge_fund_response
import json
# Load environment variables from .env file
@@ -26,21 +27,6 @@
init(autoreset=True)
-def parse_hedge_fund_response(response):
- """Parses a JSON string and returns a dictionary."""
- try:
- return json.loads(response)
- except json.JSONDecodeError as e:
- print(f"JSON decoding error: {e}\nResponse: {repr(response)}")
- return None
- except TypeError as e:
- print(f"Invalid response type (expected string, got {type(response).__name__}): {e}")
- return None
- except Exception as e:
- print(f"Unexpected error while parsing response: {e}\nResponse: {repr(response)}")
- return None
-
-
##### Run the Hedge Fund #####
def run_hedge_fund(
tickers: list[str],
diff --git a/src/utils/analysts.py b/src/utils/analysts.py
index 6fd47f399..dd0a0c63f 100644
--- a/src/utils/analysts.py
+++ b/src/utils/analysts.py
@@ -5,16 +5,20 @@
from src.agents.bill_ackman import bill_ackman_agent
from src.agents.cathie_wood import cathie_wood_agent
from src.agents.charlie_munger import charlie_munger_agent
+from src.agents.crypto_sentiment_analyst import crypto_sentiment_analyst_agent
from src.agents.fundamentals import fundamentals_analyst_agent
from src.agents.michael_burry import michael_burry_agent
-from src.agents.phil_fisher import phil_fisher_agent
+from src.agents.on_chain_analyst import on_chain_analyst_agent
from src.agents.peter_lynch import peter_lynch_agent
+from src.agents.phil_fisher import phil_fisher_agent
+from src.agents.rakesh_jhunjhunwala import rakesh_jhunjhunwala_agent
from src.agents.sentiment import sentiment_analyst_agent
from src.agents.stanley_druckenmiller import stanley_druckenmiller_agent
from src.agents.technicals import technical_analyst_agent
+from src.agents.tokenomics_analyst import tokenomics_analyst_agent
from src.agents.valuation import valuation_analyst_agent
from src.agents.warren_buffett import warren_buffett_agent
-from src.agents.rakesh_jhunjhunwala import rakesh_jhunjhunwala_agent
+from src.config import IS_CRYPTO
# Define analyst configuration - single source of truth
ANALYST_CONFIG = {
@@ -95,6 +99,27 @@
},
}
+if IS_CRYPTO:
+ ANALYST_CONFIG.update(
+ {
+ "on_chain_analyst": {
+ "display_name": "On-Chain Analyst",
+ "agent_func": on_chain_analyst_agent,
+ "order": 15,
+ },
+ "tokenomics_analyst": {
+ "display_name": "Tokenomics Analyst",
+ "agent_func": tokenomics_analyst_agent,
+ "order": 16,
+ },
+ "crypto_sentiment_analyst": {
+ "display_name": "Crypto Sentiment Analyst",
+ "agent_func": crypto_sentiment_analyst_agent,
+ "order": 17,
+ },
+ }
+ )
+
# Derive ANALYST_ORDER from ANALYST_CONFIG for backwards compatibility
ANALYST_ORDER = [(config["display_name"], key) for key, config in sorted(ANALYST_CONFIG.items(), key=lambda x: x[1]["order"])]
diff --git a/src/utils/display.py b/src/utils/display.py
index 0ffb5a270..155b97a72 100644
--- a/src/utils/display.py
+++ b/src/utils/display.py
@@ -1,8 +1,11 @@
+import json
+import os
+from typing import Any
+
from colorama import Fore, Style
from tabulate import tabulate
+
from .analysts import ANALYST_ORDER
-import os
-import json
def sort_agent_signals(signals):
@@ -36,7 +39,7 @@ def print_trading_output(result: dict) -> None:
for agent, signals in result.get("analyst_signals", {}).items():
if ticker not in signals:
continue
-
+
# Skip Risk Management agent in the signals section
if agent == "risk_management_agent":
continue
@@ -51,12 +54,12 @@ def print_trading_output(result: dict) -> None:
"BEARISH": Fore.RED,
"NEUTRAL": Fore.YELLOW,
}.get(signal_type, Fore.WHITE)
-
+
# Get reasoning if available
reasoning_str = ""
if "reasoning" in signal and signal["reasoning"]:
reasoning = signal["reasoning"]
-
+
# Handle different types of reasoning (string, dict, etc.)
if isinstance(reasoning, str):
reasoning_str = reasoning
@@ -66,7 +69,7 @@ def print_trading_output(result: dict) -> None:
else:
# Convert any other type to string
reasoning_str = str(reasoning)
-
+
# Wrap long reasoning text to make it more readable
wrapped_reasoning = ""
current_line = ""
@@ -83,7 +86,7 @@ def print_trading_output(result: dict) -> None:
current_line = word
if current_line:
wrapped_reasoning += current_line
-
+
reasoning_str = wrapped_reasoning
table_data.append(
@@ -147,21 +150,21 @@ def print_trading_output(result: dict) -> None:
],
["Reasoning", f"{Fore.WHITE}{wrapped_reasoning}{Style.RESET_ALL}"],
]
-
+
print(f"\n{Fore.WHITE}{Style.BRIGHT}TRADING DECISION:{Style.RESET_ALL} [{Fore.CYAN}{ticker}{Style.RESET_ALL}]")
print(tabulate(decision_data, tablefmt="grid", colalign=("left", "left")))
# Print Portfolio Summary
print(f"\n{Fore.WHITE}{Style.BRIGHT}PORTFOLIO SUMMARY:{Style.RESET_ALL}")
portfolio_data = []
-
+
# Extract portfolio manager reasoning (common for all tickers)
portfolio_manager_reasoning = None
for ticker, decision in decisions.items():
if decision.get("reasoning"):
portfolio_manager_reasoning = decision.get("reasoning")
break
-
+
for ticker, decision in decisions.items():
action = decision.get("action", "").upper()
action_color = {
@@ -181,7 +184,7 @@ def print_trading_output(result: dict) -> None:
)
headers = [f"{Fore.WHITE}Ticker", "Action", "Quantity", "Confidence"]
-
+
# Print the portfolio summary table
print(
tabulate(
@@ -191,7 +194,7 @@ def print_trading_output(result: dict) -> None:
colalign=("left", "center", "right", "right"),
)
)
-
+
# Print Portfolio Manager's reasoning if available
if portfolio_manager_reasoning:
# Handle different types of reasoning (string, dict, etc.)
@@ -204,7 +207,7 @@ def print_trading_output(result: dict) -> None:
else:
# Convert any other type to string
reasoning_str = str(portfolio_manager_reasoning)
-
+
# Wrap long reasoning text to make it more readable
wrapped_reasoning = ""
current_line = ""
@@ -221,7 +224,7 @@ def print_trading_output(result: dict) -> None:
current_line = word
if current_line:
wrapped_reasoning += current_line
-
+
print(f"\n{Fore.WHITE}{Style.BRIGHT}Portfolio Strategy:{Style.RESET_ALL}")
print(f"{Fore.CYAN}{wrapped_reasoning}{Style.RESET_ALL}")
@@ -241,7 +244,6 @@ def print_backtest_results(table_rows: list) -> None:
else:
ticker_rows.append(row)
-
# Display latest portfolio summary
if summary_rows:
latest_summary = summary_rows[-1]
@@ -256,7 +258,7 @@ def print_backtest_results(table_rows: list) -> None:
print(f"Total Position Value: {Fore.YELLOW}${float(position_str):,.2f}{Style.RESET_ALL}")
print(f"Total Value: {Fore.WHITE}${float(total_str):,.2f}{Style.RESET_ALL}")
print(f"Return: {latest_summary[9]}")
-
+
# Display performance metrics if available
if latest_summary[10]: # Sharpe ratio
print(f"Sharpe Ratio: {latest_summary[10]}")
@@ -323,7 +325,7 @@ def format_backtest_row(
sharpe_ratio: float = None,
sortino_ratio: float = None,
max_drawdown: float = None,
-) -> list[any]:
+) -> list[Any]:
"""Format a row for the backtest results table"""
# Color the action
action_color = {
diff --git a/src/utils/ollama.py b/src/utils/ollama.py
index 5af70ee44..c62eee779 100644
--- a/src/utils/ollama.py
+++ b/src/utils/ollama.py
@@ -70,10 +70,8 @@ def start_ollama_server() -> bool:
system = platform.system().lower()
try:
- if system == "darwin" or system == "linux": # macOS or Linux
+ if system in ("darwin", "linux", "windows"):
subprocess.Popen(["ollama", "serve"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- elif system == "windows": # Windows
- subprocess.Popen(["ollama", "serve"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
else:
print(f"{Fore.RED}Unsupported operating system: {system}{Style.RESET_ALL}")
return False
diff --git a/src/utils/parsing.py b/src/utils/parsing.py
new file mode 100644
index 000000000..dfba4f29f
--- /dev/null
+++ b/src/utils/parsing.py
@@ -0,0 +1,29 @@
+"""Utility functions for parsing model responses."""
+import json
+from typing import Any, Optional
+
+
+def parse_hedge_fund_response(response: str) -> Optional[dict[str, Any]]:
+ """Parse a hedge fund JSON response safely.
+
+ Args:
+ response: The raw JSON string returned by the agent.
+
+ Returns:
+ A dictionary representation of the JSON response or ``None`` if parsing
+ fails.
+ """
+ try:
+ return json.loads(response)
+ except json.JSONDecodeError as e:
+ print(f"JSON decoding error: {e}\nResponse: {repr(response)}")
+ return None
+ except TypeError as e:
+ print(
+ f"Invalid response type (expected string, got {type(response).__name__}): {e}"
+ )
+ return None
+ except Exception as e: # noqa: BLE001
+ print(f"Unexpected error while parsing response: {e}\nResponse: {repr(response)}")
+ return None
+
diff --git a/tests/test_crypto_pipeline.py b/tests/test_crypto_pipeline.py
new file mode 100644
index 000000000..bb090ae23
--- /dev/null
+++ b/tests/test_crypto_pipeline.py
@@ -0,0 +1,12 @@
+import os
+import sys
+
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
+import pytest
+
+pytest.importorskip("langchain_core", reason="langchain not installed")
+from app.backend.services.graph import run_graph
+
+
+def test_crypto_smoke():
+ run_graph(pair="BTC/USDT", exchange="binance", timeframe="1h")