diff --git a/app/backend/models/schemas.py b/app/backend/models/schemas.py index 966732d07..eb7994b91 100644 --- a/app/backend/models/schemas.py +++ b/app/backend/models/schemas.py @@ -65,6 +65,7 @@ class BaseHedgeFundRequest(BaseModel): agent_models: Optional[List[AgentModelConfig]] = None model_name: Optional[str] = "gpt-4.1" model_provider: Optional[ModelProvider] = ModelProvider.OPENAI + data_provider: Optional[str] = "yfinance" # Default to free option margin_requirement: float = 0.0 portfolio_positions: Optional[List[PortfolioPosition]] = None api_keys: Optional[Dict[str, str]] = None diff --git a/app/backend/services/graph.py b/app/backend/services/graph.py index 958c316b3..0a91e1939 100644 --- a/app/backend/services/graph.py +++ b/app/backend/services/graph.py @@ -129,12 +129,12 @@ def create_graph(graph_nodes: list, graph_edges: list) -> StateGraph: return graph -async def run_graph_async(graph, portfolio, tickers, start_date, end_date, model_name, model_provider, request=None): +async def run_graph_async(graph, portfolio, tickers, start_date, end_date, model_name, model_provider, data_provider="yfinance", request=None): """Async wrapper for run_graph to work with asyncio.""" # Use run_in_executor to run the synchronous function in a separate thread # so it doesn't block the event loop loop = asyncio.get_running_loop() - result = await loop.run_in_executor(None, lambda: run_graph(graph, portfolio, tickers, start_date, end_date, model_name, model_provider, request)) # Use default executor + result = await loop.run_in_executor(None, lambda: run_graph(graph, portfolio, tickers, start_date, end_date, model_name, model_provider, data_provider, request)) # Use default executor return result @@ -146,6 +146,7 @@ def run_graph( end_date: str, model_name: str, model_provider: str, + data_provider: str = "yfinance", request=None, ) -> dict: """ @@ -171,6 +172,7 @@ def run_graph( "show_reasoning": False, "model_name": model_name, "model_provider": model_provider, + "data_provider": data_provider, "request": request, # Pass the request for agent-specific model access }, }, diff --git a/poetry.lock b/poetry.lock index c1c5cfdb8..f4bd3adbf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. [[package]] name = "alembic" @@ -6,6 +6,7 @@ version = "1.15.2" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "alembic-1.15.2-py3-none-any.whl", hash = "sha256:2e76bd916d547f6900ec4bb5a90aeac1485d2c92536923d0b138c02b126edc53"}, {file = "alembic-1.15.2.tar.gz", hash = "sha256:1c72391bbdeffccfe317eefba686cb9a3c078005478885413b95c3b26c57a8a7"}, @@ -25,6 +26,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -36,6 +38,7 @@ version = "0.50.0" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "anthropic-0.50.0-py3-none-any.whl", hash = "sha256:defbd79327ca2fa61fd7b9eb2f1627dfb1f69c25d49288c52e167ddb84574f80"}, {file = "anthropic-0.50.0.tar.gz", hash = "sha256:42175ec04ce4ff2fa37cd436710206aadff546ee99d70d974699f59b49adc66f"}, @@ -60,6 +63,7 @@ version = "3.7.1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, @@ -71,15 +75,39 @@ sniffio = ">=1.1" [package.extras] doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] -test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4) ; python_version < \"3.8\"", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; python_version < \"3.12\" and platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (<0.22)"] +[[package]] +name = "beautifulsoup4" +version = "4.13.4" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.7.0" +groups = ["main"] +files = [ + {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, + {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, +] + +[package.dependencies] +soupsieve = ">1.2" +typing-extensions = ">=4.0.0" + +[package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] +html5lib = ["html5lib"] +lxml = ["lxml"] + [[package]] name = "black" version = "23.12.1" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2"}, {file = "black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba"}, @@ -114,7 +142,7 @@ platformdirs = ">=2" [package.extras] colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +d = ["aiohttp (>=3.7.4) ; sys_platform != \"win32\" or implementation_name != \"pypy\"", "aiohttp (>=3.7.4,!=3.9.0) ; sys_platform == \"win32\" and implementation_name == \"pypy\""] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] @@ -124,6 +152,7 @@ version = "5.5.2" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a"}, {file = "cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4"}, @@ -135,6 +164,7 @@ version = "2025.4.26" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, @@ -146,6 +176,7 @@ version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, @@ -225,6 +256,7 @@ version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -326,6 +358,7 @@ version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, @@ -340,6 +373,7 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -351,6 +385,7 @@ version = "1.3.2" description = "Python library for calculating contours of 2D quadrilateral grids" optional = false python-versions = ">=3.10" +groups = ["main"] files = [ {file = "contourpy-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba38e3f9f330af820c4b27ceb4b9c7feee5fe0493ea53a8720f4792667465934"}, {file = "contourpy-1.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc41ba0714aa2968d1f8674ec97504a8f7e334f48eeacebcaa6256213acb0989"}, @@ -421,12 +456,43 @@ mypy = ["bokeh", "contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.15.0)", " test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] +[[package]] +name = "curl-cffi" +version = "0.13.0" +description = "libcurl ffi bindings for Python, with impersonation support." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "curl_cffi-0.13.0-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:434cadbe8df2f08b2fc2c16dff2779fb40b984af99c06aa700af898e185bb9db"}, + {file = "curl_cffi-0.13.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:59afa877a9ae09efa04646a7d068eeea48915a95d9add0a29854e7781679fcd7"}, + {file = "curl_cffi-0.13.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d06ed389e45a7ca97b17c275dbedd3d6524560270e675c720e93a2018a766076"}, + {file = "curl_cffi-0.13.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4e0de45ab3b7a835c72bd53640c2347415111b43421b5c7a1a0b18deae2e541"}, + {file = "curl_cffi-0.13.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8eb4083371bbb94e9470d782de235fb5268bf43520de020c9e5e6be8f395443f"}, + {file = "curl_cffi-0.13.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:28911b526e8cd4aa0e5e38401bfe6887e8093907272f1f67ca22e6beb2933a51"}, + {file = "curl_cffi-0.13.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6d433ffcb455ab01dd0d7bde47109083aa38b59863aa183d29c668ae4c96bf8e"}, + {file = "curl_cffi-0.13.0-cp39-abi3-win_amd64.whl", hash = "sha256:66a6b75ce971de9af64f1b6812e275f60b88880577bac47ef1fa19694fa21cd3"}, + {file = "curl_cffi-0.13.0-cp39-abi3-win_arm64.whl", hash = "sha256:d438a3b45244e874794bc4081dc1e356d2bb926dcc7021e5a8fef2e2105ef1d8"}, + {file = "curl_cffi-0.13.0.tar.gz", hash = "sha256:62ecd90a382bd5023750e3606e0aa7cb1a3a8ba41c14270b8e5e149ebf72c5ca"}, +] + +[package.dependencies] +certifi = ">=2024.2.2" +cffi = ">=1.12.0" + +[package.extras] +build = ["cibuildwheel", "wheel"] +dev = ["charset_normalizer (>=3.3.2,<4.0)", "coverage (>=6.4.1,<7.0)", "cryptography (>=42.0.5,<43.0)", "httpx (==0.23.1)", "mypy (>=1.9.0,<2.0)", "pytest (>=8.1.1,<9.0)", "pytest-asyncio (>=0.23.6,<1.0)", "pytest-trio (>=0.8.0,<1.0)", "ruff (>=0.3.5,<1.0)", "trio (>=0.25.0,<1.0)", "trustme (>=1.1.0,<2.0)", "typing_extensions", "uvicorn (>=0.29.0,<1.0)", "websockets (>=12.0,<13.0)"] +extra = ["lxml_html_clean", "markdownify (>=1.1.0)", "readability-lxml (>=0.8.1)"] +test = ["charset_normalizer (>=3.3.2,<4.0)", "cryptography (>=42.0.5,<43.0)", "fastapi (==0.110.0)", "httpx (==0.23.1)", "proxy.py (>=2.4.3,<3.0)", "pytest (>=8.1.1,<9.0)", "pytest-asyncio (>=0.23.6,<1.0)", "pytest-trio (>=0.8.0,<1.0)", "python-multipart (>=0.0.9,<1.0)", "trio (>=0.25.0,<1.0)", "trustme (>=1.1.0,<2.0)", "typing_extensions", "uvicorn (>=0.29.0,<1.0)", "websockets (>=12.0,<13.0)"] + [[package]] name = "cycler" version = "0.12.1" description = "Composable style cycles" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, @@ -442,6 +508,7 @@ version = "0.7.1" description = "XML bomb protection for Python stdlib modules" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["main"] files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -453,6 +520,7 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -464,6 +532,7 @@ version = "0.104.1" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "fastapi-0.104.1-py3-none-any.whl", hash = "sha256:752dc31160cdbd0436bb93bad51560b57e525cbb1d4bbf6f4904ceee75548241"}, {file = "fastapi-0.104.1.tar.gz", hash = "sha256:e5e4540a7c5e1dcfbbcf5b903c234feddcdcd881f191977a1c5dfd917487e7ae"}, @@ -484,6 +553,7 @@ version = "0.0.7" description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "fastapi_cli-0.0.7-py3-none-any.whl", hash = "sha256:d549368ff584b2804336c61f192d86ddea080c11255f375959627911944804f4"}, {file = "fastapi_cli-0.0.7.tar.gz", hash = "sha256:02b3b65956f526412515907a0793c9094abd4bfb5457b389f645b0ea6ba3605e"}, @@ -503,6 +573,7 @@ version = "1.2.0" description = "Infer file type and MIME type of any file/buffer. No external dependencies." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25"}, {file = "filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb"}, @@ -514,6 +585,7 @@ version = "6.1.0" description = "the modular source code checker: pep8 pyflakes and co" optional = false python-versions = ">=3.8.1" +groups = ["dev"] files = [ {file = "flake8-6.1.0-py2.py3-none-any.whl", hash = "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"}, {file = "flake8-6.1.0.tar.gz", hash = "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23"}, @@ -530,6 +602,7 @@ version = "4.57.0" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "fonttools-4.57.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:babe8d1eb059a53e560e7bf29f8e8f4accc8b6cfb9b5fd10e485bde77e71ef41"}, {file = "fonttools-4.57.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81aa97669cd726349eb7bd43ca540cf418b279ee3caba5e2e295fb4e8f841c02"}, @@ -584,18 +657,83 @@ files = [ ] [package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0) ; python_version <= \"3.12\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"] graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "pycairo", "scipy"] +interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""] lxml = ["lxml (>=4.0)"] pathops = ["skia-pathops (>=0.5.0)"] plot = ["matplotlib"] repacker = ["uharfbuzz (>=0.23.0)"] symfont = ["sympy"] -type1 = ["xattr"] +type1 = ["xattr ; sys_platform == \"darwin\""] ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.1.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] +unicode = ["unicodedata2 (>=15.1.0) ; python_version <= \"3.12\""] +woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"] + +[[package]] +name = "frozendict" +version = "2.4.6" +description = "A simple immutable dictionary" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "frozendict-2.4.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c3a05c0a50cab96b4bb0ea25aa752efbfceed5ccb24c007612bc63e51299336f"}, + {file = "frozendict-2.4.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f5b94d5b07c00986f9e37a38dd83c13f5fe3bf3f1ccc8e88edea8fe15d6cd88c"}, + {file = "frozendict-2.4.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4c789fd70879ccb6289a603cdebdc4953e7e5dea047d30c1b180529b28257b5"}, + {file = "frozendict-2.4.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da6a10164c8a50b34b9ab508a9420df38f4edf286b9ca7b7df8a91767baecb34"}, + {file = "frozendict-2.4.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9a8a43036754a941601635ea9c788ebd7a7efbed2becba01b54a887b41b175b9"}, + {file = "frozendict-2.4.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c9905dcf7aa659e6a11b8051114c9fa76dfde3a6e50e6dc129d5aece75b449a2"}, + {file = "frozendict-2.4.6-cp310-cp310-win_amd64.whl", hash = "sha256:323f1b674a2cc18f86ab81698e22aba8145d7a755e0ac2cccf142ee2db58620d"}, + {file = "frozendict-2.4.6-cp310-cp310-win_arm64.whl", hash = "sha256:eabd21d8e5db0c58b60d26b4bb9839cac13132e88277e1376970172a85ee04b3"}, + {file = "frozendict-2.4.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eddabeb769fab1e122d3a6872982c78179b5bcc909fdc769f3cf1964f55a6d20"}, + {file = "frozendict-2.4.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:377a65be0a700188fc21e669c07de60f4f6d35fae8071c292b7df04776a1c27b"}, + {file = "frozendict-2.4.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce1e9217b85eec6ba9560d520d5089c82dbb15f977906eb345d81459723dd7e3"}, + {file = "frozendict-2.4.6-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:7291abacf51798d5ffe632771a69c14fb423ab98d63c4ccd1aa382619afe2f89"}, + {file = "frozendict-2.4.6-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:e72fb86e48811957d66ffb3e95580af7b1af1e6fbd760ad63d7bd79b2c9a07f8"}, + {file = "frozendict-2.4.6-cp36-cp36m-win_amd64.whl", hash = "sha256:622301b1c29c4f9bba633667d592a3a2b093cb408ba3ce578b8901ace3931ef3"}, + {file = "frozendict-2.4.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a4e3737cb99ed03200cd303bdcd5514c9f34b29ee48f405c1184141bd68611c9"}, + {file = "frozendict-2.4.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49ffaf09241bc1417daa19362a2241a4aa435f758fd4375c39ce9790443a39cd"}, + {file = "frozendict-2.4.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d69418479bfb834ba75b0e764f058af46ceee3d655deb6a0dd0c0c1a5e82f09"}, + {file = "frozendict-2.4.6-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:c131f10c4d3906866454c4e89b87a7e0027d533cce8f4652aa5255112c4d6677"}, + {file = "frozendict-2.4.6-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:fc67cbb3c96af7a798fab53d52589752c1673027e516b702ab355510ddf6bdff"}, + {file = "frozendict-2.4.6-cp37-cp37m-win_amd64.whl", hash = "sha256:7730f8ebe791d147a1586cbf6a42629351d4597773317002181b66a2da0d509e"}, + {file = "frozendict-2.4.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:807862e14b0e9665042458fde692c4431d660c4219b9bb240817f5b918182222"}, + {file = "frozendict-2.4.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9647c74efe3d845faa666d4853cfeabbaee403b53270cabfc635b321f770e6b8"}, + {file = "frozendict-2.4.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:665fad3f0f815aa41294e561d98dbedba4b483b3968e7e8cab7d728d64b96e33"}, + {file = "frozendict-2.4.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f42e6b75254ea2afe428ad6d095b62f95a7ae6d4f8272f0bd44a25dddd20f67"}, + {file = "frozendict-2.4.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:02331541611f3897f260900a1815b63389654951126e6e65545e529b63c08361"}, + {file = "frozendict-2.4.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:18d50a2598350b89189da9150058191f55057581e40533e470db46c942373acf"}, + {file = "frozendict-2.4.6-cp38-cp38-win_amd64.whl", hash = "sha256:1b4a3f8f6dd51bee74a50995c39b5a606b612847862203dd5483b9cd91b0d36a"}, + {file = "frozendict-2.4.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a76cee5c4be2a5d1ff063188232fffcce05dde6fd5edd6afe7b75b247526490e"}, + {file = "frozendict-2.4.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba5ef7328706db857a2bdb2c2a17b4cd37c32a19c017cff1bb7eeebc86b0f411"}, + {file = "frozendict-2.4.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:669237c571856be575eca28a69e92a3d18f8490511eff184937283dc6093bd67"}, + {file = "frozendict-2.4.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0aaa11e7c472150efe65adbcd6c17ac0f586896096ab3963775e1c5c58ac0098"}, + {file = "frozendict-2.4.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b8f2829048f29fe115da4a60409be2130e69402e29029339663fac39c90e6e2b"}, + {file = "frozendict-2.4.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:94321e646cc39bebc66954a31edd1847d3a2a3483cf52ff051cd0996e7db07db"}, + {file = "frozendict-2.4.6-cp39-cp39-win_amd64.whl", hash = "sha256:74b6b26c15dddfefddeb89813e455b00ebf78d0a3662b89506b4d55c6445a9f4"}, + {file = "frozendict-2.4.6-cp39-cp39-win_arm64.whl", hash = "sha256:7088102345d1606450bd1801a61139bbaa2cb0d805b9b692f8d81918ea835da6"}, + {file = "frozendict-2.4.6-py311-none-any.whl", hash = "sha256:d065db6a44db2e2375c23eac816f1a022feb2fa98cbb50df44a9e83700accbea"}, + {file = "frozendict-2.4.6-py312-none-any.whl", hash = "sha256:49344abe90fb75f0f9fdefe6d4ef6d4894e640fadab71f11009d52ad97f370b9"}, + {file = "frozendict-2.4.6-py313-none-any.whl", hash = "sha256:7134a2bb95d4a16556bb5f2b9736dceb6ea848fa5b6f3f6c2d6dba93b44b4757"}, + {file = "frozendict-2.4.6.tar.gz", hash = "sha256:df7cd16470fbd26fc4969a208efadc46319334eb97def1ddf48919b351192b8e"}, +] + +[[package]] +name = "gigachat" +version = "0.1.42.post1" +description = "GigaChat. Python-library for GigaChain and LangChain" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "gigachat-0.1.42.post1-py3-none-any.whl", hash = "sha256:abd0e6623e179c39a51fcbd75471c3011449c1d6451dbbdbb847d29e98988902"}, + {file = "gigachat-0.1.42.post1.tar.gz", hash = "sha256:bd0ffddc5c0fcee8deede84181fbce9b4c4a4f536ddcf5140016cecc83d92044"}, +] + +[package.dependencies] +httpx = "<1" +pydantic = ">=1" [[package]] name = "google-ai-generativelanguage" @@ -603,6 +741,7 @@ version = "0.6.18" description = "Google Ai Generativelanguage API client library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "google_ai_generativelanguage-0.6.18-py3-none-any.whl", hash = "sha256:13d8174fea90b633f520789d32df7b422058fd5883b022989c349f1017db7fcf"}, {file = "google_ai_generativelanguage-0.6.18.tar.gz", hash = "sha256:274ba9fcf69466ff64e971d565884434388e523300afd468fc8e3033cd8e606e"}, @@ -612,8 +751,8 @@ files = [ google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0", extras = ["grpc"]} google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0" proto-plus = [ - {version = ">=1.22.3,<2.0.0", markers = "python_version < \"3.13\""}, {version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""}, + {version = ">=1.22.3,<2.0.0", markers = "python_version < \"3.13\""}, ] protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" @@ -623,6 +762,7 @@ version = "2.24.2" description = "Google API client core library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "google_api_core-2.24.2-py3-none-any.whl", hash = "sha256:810a63ac95f3c441b7c0e43d344e372887f62ce9071ba972eacf32672e072de9"}, {file = "google_api_core-2.24.2.tar.gz", hash = "sha256:81718493daf06d96d6bc76a91c23874dbf2fac0adbbf542831b805ee6e974696"}, @@ -634,15 +774,15 @@ googleapis-common-protos = ">=1.56.2,<2.0.0" grpcio = {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""} grpcio-status = {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""} proto-plus = [ - {version = ">=1.22.3,<2.0.0", markers = "python_version < \"3.13\""}, {version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""}, + {version = ">=1.22.3,<2.0.0"}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" requests = ">=2.18.0,<3.0.0" [package.extras] async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.dev0)"] -grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev) ; python_version >= \"3.11\"", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0) ; python_version >= \"3.11\""] grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] @@ -652,6 +792,7 @@ version = "2.39.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "google_auth-2.39.0-py2.py3-none-any.whl", hash = "sha256:0150b6711e97fb9f52fe599f55648950cc4540015565d8fbb31be2ad6e1548a2"}, {file = "google_auth-2.39.0.tar.gz", hash = "sha256:73222d43cdc35a3aeacbfdcaf73142a97839f10de930550d89ebfe1d0a00cde7"}, @@ -665,11 +806,11 @@ rsa = ">=3.1.4,<5" [package.extras] aiohttp = ["aiohttp (>=3.6.2,<4.0.0)", "requests (>=2.20.0,<3.0.0)"] enterprise-cert = ["cryptography", "pyopenssl"] -pyjwt = ["cryptography (<39.0.0)", "cryptography (>=38.0.3)", "pyjwt (>=2.0)"] -pyopenssl = ["cryptography (<39.0.0)", "cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +pyjwt = ["cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "pyjwt (>=2.0)"] +pyopenssl = ["cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0)"] -testing = ["aiohttp (<3.10.0)", "aiohttp (>=3.6.2,<4.0.0)", "aioresponses", "cryptography (<39.0.0)", "cryptography (>=38.0.3)", "flask", "freezegun", "grpcio", "mock", "oauth2client", "packaging", "pyjwt (>=2.0)", "pyopenssl (<24.3.0)", "pyopenssl (>=20.0.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-localserver", "pyu2f (>=0.1.5)", "requests (>=2.20.0,<3.0.0)", "responses", "urllib3"] +testing = ["aiohttp (<3.10.0)", "aiohttp (>=3.6.2,<4.0.0)", "aioresponses", "cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "flask", "freezegun", "grpcio", "mock", "oauth2client", "packaging", "pyjwt (>=2.0)", "pyopenssl (<24.3.0)", "pyopenssl (>=20.0.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-localserver", "pyu2f (>=0.1.5)", "requests (>=2.20.0,<3.0.0)", "responses", "urllib3"] urllib3 = ["packaging", "urllib3"] [[package]] @@ -678,6 +819,7 @@ version = "1.70.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"}, {file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"}, @@ -695,6 +837,8 @@ version = "3.2.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.9" +groups = ["main"] +markers = "(platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\") and python_version <= \"3.13\"" files = [ {file = "greenlet-3.2.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:777c1281aa7c786738683e302db0f55eb4b0077c20f1dc53db8852ffaea0a6b0"}, {file = "greenlet-3.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3059c6f286b53ea4711745146ffe5a5c5ff801f62f6c56949446e0f6461f8157"}, @@ -763,6 +907,7 @@ version = "0.23.1" description = "The official Python library for the groq API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"}, {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"}, @@ -782,6 +927,7 @@ version = "1.71.0" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "grpcio-1.71.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:c200cb6f2393468142eb50ab19613229dcc7829b5ccee8b658a36005f6669fdd"}, {file = "grpcio-1.71.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b2266862c5ad664a380fbbcdbdb8289d71464c42a8c29053820ee78ba0119e5d"}, @@ -845,6 +991,7 @@ version = "1.62.3" description = "Status proto mapping for gRPC" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"}, {file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"}, @@ -861,6 +1008,7 @@ version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, @@ -872,6 +1020,7 @@ version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, @@ -893,6 +1042,7 @@ version = "0.6.4" description = "A collection of framework independent HTTP protocol utils." optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ {file = "httptools-0.6.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c73ce323711a6ffb0d247dcd5a550b8babf0f757e86a52558fe5b86d6fefcc0"}, {file = "httptools-0.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345c288418f0944a6fe67be8e6afa9262b18c7626c3ef3c28adc5eabc06a68da"}, @@ -948,6 +1098,7 @@ version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, @@ -961,7 +1112,7 @@ idna = "*" sniffio = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -973,6 +1124,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -987,6 +1139,7 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -998,6 +1151,7 @@ version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, @@ -1012,6 +1166,7 @@ version = "0.9.0" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"}, {file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"}, @@ -1097,6 +1252,7 @@ version = "1.33" description = "Apply JSON-Patches (RFC 6902)" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +groups = ["main"] files = [ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, @@ -1111,6 +1267,7 @@ version = "3.0.0" description = "Identify specific nodes in a JSON document (RFC 6901)" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, @@ -1122,6 +1279,7 @@ version = "1.4.8" description = "A fast implementation of the Cassowary constraint solver" optional = false python-versions = ">=3.10" +groups = ["main"] files = [ {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db"}, {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b"}, @@ -1211,6 +1369,7 @@ version = "0.3.27" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain-0.3.27-py3-none-any.whl", hash = "sha256:7b20c4f338826acb148d885b20a73a16e410ede9ee4f19bb02011852d5f98798"}, {file = "langchain-0.3.27.tar.gz", hash = "sha256:aa6f1e6274ff055d0fd36254176770f356ed0a8994297d1df47df341953cec62"}, @@ -1250,6 +1409,7 @@ version = "0.3.5" description = "An integration package connecting AnthropicMessages and LangChain" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain_anthropic-0.3.5-py3-none-any.whl", hash = "sha256:bad34b02d7b4bdca9a9471bc391b01269fd8dc4600b83ca2a3e76925b7c27fe6"}, {file = "langchain_anthropic-0.3.5.tar.gz", hash = "sha256:2aa1673511056061680492871f386d68a8b62947e0eb1f15303ef10db16c8357"}, @@ -1267,6 +1427,7 @@ version = "0.3.72" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "langchain_core-0.3.72-py3-none-any.whl", hash = "sha256:9fa15d390600eb6b6544397a7aa84be9564939b6adf7a2b091179ea30405b240"}, {file = "langchain_core-0.3.72.tar.gz", hash = "sha256:4de3828909b3d7910c313242ab07b241294650f5cb6eac17738dd3638b1cd7de"}, @@ -1287,6 +1448,7 @@ version = "0.1.3" description = "An integration package connecting DeepSeek and LangChain" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain_deepseek-0.1.3-py3-none-any.whl", hash = "sha256:8588e826371b417fca65c02f4273b4061eb9815a7bfcd5eb05acaa40d603aa89"}, {file = "langchain_deepseek-0.1.3.tar.gz", hash = "sha256:89dd6aa120fb50dcfcd3d593626d34c1c40deefe4510710d0807fcc19481adf5"}, @@ -1296,12 +1458,30 @@ files = [ langchain-core = ">=0.3.47,<1.0.0" langchain-openai = ">=0.3.9,<1.0.0" +[[package]] +name = "langchain-gigachat" +version = "0.3.12" +description = "An integration package connecting GigaChat and LangChain" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "langchain_gigachat-0.3.12-py3-none-any.whl", hash = "sha256:e7ee809a7e9c2dc8d5339f50c8478cad2bb24829db923b641142b5c1d4d991fe"}, + {file = "langchain_gigachat-0.3.12.tar.gz", hash = "sha256:35312a041f56c344e234777bed0c3d5b6e90542d832afbfb225f2728a51a5af5"}, +] + +[package.dependencies] +gigachat = ">=0.1.41.post1,<0.2.0" +langchain-core = ">=0.3,<0.4" +types-requests = ">=2.32,<3.0" + [[package]] name = "langchain-google-genai" version = "2.1.3" description = "An integration package connecting Google's genai package and LangChain" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain_google_genai-2.1.3-py3-none-any.whl", hash = "sha256:adf222931ac7af543f4013751a9b7dbd9ed637fb4eb3e4e0cd7e1d5d7e066d36"}, {file = "langchain_google_genai-2.1.3.tar.gz", hash = "sha256:0d4e2abf01a7594a9420d3569cf2cd4239a01cc24c6698d3c2c92a072b9b7b4a"}, @@ -1319,6 +1499,7 @@ version = "0.2.3" description = "An integration package connecting Groq and LangChain" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain_groq-0.2.3-py3-none-any.whl", hash = "sha256:3572c812acc1478ab0670c48eb9a135c95f47631190da750e48408267462a12d"}, {file = "langchain_groq-0.2.3.tar.gz", hash = "sha256:f94810fe734c9402b36273ddc3509eaa67f12a7d06b666c6ca472ab0bfdf37b7"}, @@ -1334,6 +1515,7 @@ version = "0.3.6" description = "An integration package connecting Ollama and LangChain" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "langchain_ollama-0.3.6-py3-none-any.whl", hash = "sha256:b339bd3fcf913b8d606ad426ef39e7122695532507fcd85aa96271b3f33dc3df"}, {file = "langchain_ollama-0.3.6.tar.gz", hash = "sha256:4270c4b30b3f3d10850cb9a1183b8c77d616195e0d9717ac745ef7f7f6cc2b6e"}, @@ -1349,6 +1531,7 @@ version = "0.3.14" description = "An integration package connecting OpenAI and LangChain" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain_openai-0.3.14-py3-none-any.whl", hash = "sha256:b8e648d2d7678a5540818199d141ff727c6f1514294b3e1e999a95357c9d66a0"}, {file = "langchain_openai-0.3.14.tar.gz", hash = "sha256:0662db78620c2e5c3ccfc1c36dc959c0ddc80e6bdf7ef81632cbf4b2cc9b9461"}, @@ -1365,6 +1548,7 @@ version = "0.3.9" description = "LangChain text splitting utilities" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "langchain_text_splitters-0.3.9-py3-none-any.whl", hash = "sha256:cee0bb816211584ea79cc79927317c358543f40404bcfdd69e69ba3ccde54401"}, {file = "langchain_text_splitters-0.3.9.tar.gz", hash = "sha256:7cd1e5a3aaf609979583eeca2eb34177622570b8fa8f586a605c6b1c34e7ebdb"}, @@ -1379,6 +1563,7 @@ version = "0.2.56" description = "Building stateful, multi-actor applications with LLMs" optional = false python-versions = "<4.0,>=3.9.0" +groups = ["main"] files = [ {file = "langgraph-0.2.56-py3-none-any.whl", hash = "sha256:ad8a4b772e34dc0137e890bb6ced596a39a1e684af66250c1e7c8150dbe90e9c"}, {file = "langgraph-0.2.56.tar.gz", hash = "sha256:af10b1ffd10d52fd4072a73f154b8c2513c0b22e5bd5d20f4567dfeecab98d1e"}, @@ -1395,6 +1580,7 @@ version = "2.0.25" description = "Library with base interfaces for LangGraph checkpoint savers." optional = false python-versions = "<4.0.0,>=3.9.0" +groups = ["main"] files = [ {file = "langgraph_checkpoint-2.0.25-py3-none-any.whl", hash = "sha256:23416a0f5bc9dd712ac10918fc13e8c9c4530c419d2985a441df71a38fc81602"}, {file = "langgraph_checkpoint-2.0.25.tar.gz", hash = "sha256:77a63cab7b5f84dec1d49db561326ec28bdd48bcefb7fe4ac372069d2609287b"}, @@ -1410,6 +1596,7 @@ version = "0.1.63" description = "SDK for interacting with LangGraph API" optional = false python-versions = "<4.0.0,>=3.9.0" +groups = ["main"] files = [ {file = "langgraph_sdk-0.1.63-py3-none-any.whl", hash = "sha256:6fb78a7fc6a30eea43bd0d6401dbc9e3263d0d4c03f63c04035980da7e586b05"}, {file = "langgraph_sdk-0.1.63.tar.gz", hash = "sha256:62bf2cc31e5aa6c5b9011ee1702bcf1e36e67e142a60bd97af2611162fb58e18"}, @@ -1425,6 +1612,7 @@ version = "0.4.13" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "langsmith-0.4.13-py3-none-any.whl", hash = "sha256:dab7b16ee16986995007bf5a777f45c18f8bf7453f67ae2ebcb46ce43c214297"}, {file = "langsmith-0.4.13.tar.gz", hash = "sha256:1ae7dbb5d8150647406f49885a2dd16ab12bd990254b5dc23718838b3d086fde"}, @@ -1452,6 +1640,7 @@ version = "1.3.10" description = "A super-fast templating language that borrows the best ideas from the existing templating languages." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59"}, {file = "mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28"}, @@ -1471,6 +1660,7 @@ version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -1495,6 +1685,7 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -1565,6 +1756,7 @@ version = "3.10.1" description = "Python plotting package" optional = false python-versions = ">=3.10" +groups = ["main"] files = [ {file = "matplotlib-3.10.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ff2ae14910be903f4a24afdbb6d7d3a6c44da210fc7d42790b87aeac92238a16"}, {file = "matplotlib-3.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0721a3fd3d5756ed593220a8b86808a36c5031fce489adb5b31ee6dbb47dd5b2"}, @@ -1622,6 +1814,7 @@ version = "0.7.0" description = "McCabe checker, plugin for flake8" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -1633,17 +1826,30 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] +[[package]] +name = "multitasking" +version = "0.0.12" +description = "Non-blocking Python methods using decorators" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "multitasking-0.0.12.tar.gz", hash = "sha256:2fba2fa8ed8c4b85e227c5dd7dc41c7d658de3b6f247927316175a57349b84d1"}, +] + [[package]] name = "mypy-extensions" version = "1.1.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, @@ -1655,6 +1861,7 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -1700,6 +1907,7 @@ version = "0.5.2" description = "The official Python client for Ollama." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "ollama-0.5.2-py3-none-any.whl", hash = "sha256:48ee9aed1c8f4cf2e4237b6d4cc36c328f1abc40da4aa6edf52698f757bc4164"}, {file = "ollama-0.5.2.tar.gz", hash = "sha256:7a575a90416a816231f216dbd10c3480b107218a90388c061fdf20d7ab7fe990"}, @@ -1715,6 +1923,7 @@ version = "1.76.2" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"}, {file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"}, @@ -1741,6 +1950,7 @@ version = "3.10.17" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "orjson-3.10.17-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bc399cf138a0201d0bf2399b44195d33a0a5aee149dab114340da0d766c88b95"}, {file = "orjson-3.10.17-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59225b27b72e0e1626d869f7b987da6c74f9b6026cf9a87c1cdaf74ca9f7b8c0"}, @@ -1822,6 +2032,7 @@ version = "1.9.1" description = "Fast, correct Python msgpack library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "ormsgpack-1.9.1-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:f1f804fd9c0fd84213a6022c34172f82323b34afa7052a4af18797582cf56365"}, {file = "ormsgpack-1.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eab5cec99c46276b37071d570aab98603f3d0309b3818da3247eb64bb95e5cfc"}, @@ -1872,6 +2083,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -1883,6 +2095,7 @@ version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, @@ -1930,8 +2143,8 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.23.2", markers = "python_version == \"3.11\""}, {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -1968,17 +2181,30 @@ version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] +[[package]] +name = "peewee" +version = "3.18.2" +description = "a little orm" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "peewee-3.18.2.tar.gz", hash = "sha256:77a54263eb61aff2ea72f63d2eeb91b140c25c1884148e28e4c0f7c4f64996a0"}, +] + [[package]] name = "pillow" version = "11.2.1" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"}, {file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"}, @@ -2069,7 +2295,7 @@ fpx = ["olefile"] mic = ["olefile"] test-arrow = ["pyarrow"] tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] -typing = ["typing-extensions"] +typing = ["typing-extensions ; python_version < \"3.10\""] xmp = ["defusedxml"] [[package]] @@ -2078,6 +2304,7 @@ version = "4.3.7" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "platformdirs-4.3.7-py3-none-any.whl", hash = "sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94"}, {file = "platformdirs-4.3.7.tar.gz", hash = "sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351"}, @@ -2094,6 +2321,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -2109,6 +2337,7 @@ version = "3.0.51" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07"}, {file = "prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed"}, @@ -2123,6 +2352,7 @@ version = "1.26.1" description = "Beautiful, Pythonic protocol buffers" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66"}, {file = "proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012"}, @@ -2140,6 +2370,7 @@ version = "6.30.2" description = "" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "protobuf-6.30.2-cp310-abi3-win32.whl", hash = "sha256:b12ef7df7b9329886e66404bef5e9ce6a26b54069d7f7436a0853ccdeb91c103"}, {file = "protobuf-6.30.2-cp310-abi3-win_amd64.whl", hash = "sha256:7653c99774f73fe6b9301b87da52af0e69783a2e371e8b599b3e9cb4da4b12b9"}, @@ -2158,6 +2389,7 @@ version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, @@ -2169,6 +2401,7 @@ version = "0.4.2" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a"}, {file = "pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6"}, @@ -2183,6 +2416,7 @@ version = "2.11.1" description = "Python style guide checker" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, @@ -2194,6 +2428,7 @@ version = "2.22" description = "C parser in Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, @@ -2205,6 +2440,7 @@ version = "2.11.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"}, {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"}, @@ -2218,7 +2454,7 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -2226,6 +2462,7 @@ version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, @@ -2337,6 +2574,7 @@ version = "3.1.0" description = "passive checker of Python programs" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pyflakes-3.1.0-py2.py3-none-any.whl", hash = "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774"}, {file = "pyflakes-3.1.0.tar.gz", hash = "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"}, @@ -2348,6 +2586,7 @@ version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, @@ -2362,6 +2601,7 @@ version = "3.2.3" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf"}, {file = "pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be"}, @@ -2376,6 +2616,7 @@ version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -2396,6 +2637,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2410,6 +2652,7 @@ version = "1.0.0" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, @@ -2424,6 +2667,7 @@ version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -2435,6 +2679,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -2497,6 +2742,7 @@ version = "2.1.0" description = "Python library to build pretty command line user prompts ⭐️" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "questionary-2.1.0-py3-none-any.whl", hash = "sha256:44174d237b68bc828e4878c763a9ad6790ee61990e0ae72927694ead57bab8ec"}, {file = "questionary-2.1.0.tar.gz", hash = "sha256:6302cdd645b19667d8f6e6634774e9538bfcd1aad9be287e743d96cacaf95587"}, @@ -2511,6 +2757,7 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -2614,6 +2861,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -2635,6 +2883,7 @@ version = "1.0.0" description = "A utility belt for advanced users of python-requests" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main"] files = [ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, @@ -2649,6 +2898,7 @@ version = "13.9.4" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, @@ -2667,6 +2917,7 @@ version = "0.14.4" description = "Rich toolkit for building command-line applications" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "rich_toolkit-0.14.4-py3-none-any.whl", hash = "sha256:cc71ebee83eaa122d8e42882408bc5a4bf0240bbf1e368811ee56d249b3d742a"}, {file = "rich_toolkit-0.14.4.tar.gz", hash = "sha256:db256cf45165cae381c9bbf3b48a0fd4d99a07c80155cc655c80212a62e28fe1"}, @@ -2683,6 +2934,7 @@ version = "4.9.1" description = "Pure-Python RSA implementation" optional = false python-versions = "<4,>=3.6" +groups = ["main"] files = [ {file = "rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762"}, {file = "rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75"}, @@ -2697,6 +2949,7 @@ version = "1.5.4" description = "Tool to Detect Surrounding Shell" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, @@ -2708,6 +2961,7 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -2719,17 +2973,31 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] +[[package]] +name = "soupsieve" +version = "2.7" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, + {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, +] + [[package]] name = "sqlalchemy" version = "2.0.40" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "SQLAlchemy-2.0.40-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ae9597cab738e7cc823f04a704fb754a9249f0b6695a6aeb63b74055cd417a96"}, {file = "SQLAlchemy-2.0.40-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a5c21ab099a83d669ebb251fddf8f5cee4d75ea40a5a1653d9c43d60e20867"}, @@ -2825,6 +3093,7 @@ version = "0.27.0" description = "The little ASGI library that shines." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, @@ -2842,6 +3111,7 @@ version = "0.9.0" description = "Pretty-print tabular data" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, @@ -2856,6 +3126,7 @@ version = "8.5.0" description = "Retry code until it succeeds" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, @@ -2871,6 +3142,7 @@ version = "0.9.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"}, {file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"}, @@ -2918,6 +3190,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -2939,6 +3212,7 @@ version = "0.15.3" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "typer-0.15.3-py3-none-any.whl", hash = "sha256:c86a65ad77ca531f03de08d1b9cb67cd09ad02ddddf4b34745b5008f43b239bd"}, {file = "typer-0.15.3.tar.gz", hash = "sha256:818873625d0569653438316567861899f7e9972f2e6e0c16dab608345ced713c"}, @@ -2950,12 +3224,28 @@ rich = ">=10.11.0" shellingham = ">=1.3.0" typing-extensions = ">=3.7.4.3" +[[package]] +name = "types-requests" +version = "2.32.4.20250809" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "types_requests-2.32.4.20250809-py3-none-any.whl", hash = "sha256:f73d1832fb519ece02c85b1f09d5f0dd3108938e7d47e7f94bbfa18a6782b163"}, + {file = "types_requests-2.32.4.20250809.tar.gz", hash = "sha256:d8060de1c8ee599311f56ff58010fb4902f462a1470802cf9f6ed27bc46c4df3"}, +] + +[package.dependencies] +urllib3 = ">=2" + [[package]] name = "typing-extensions" version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, @@ -2967,6 +3257,7 @@ version = "0.4.0" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, @@ -2981,6 +3272,7 @@ version = "2025.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["main"] files = [ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, @@ -2992,13 +3284,14 @@ version = "2.4.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -3009,6 +3302,7 @@ version = "0.34.2" description = "The lightning-fast ASGI server." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, @@ -3021,12 +3315,12 @@ h11 = ">=0.8" httptools = {version = ">=0.6.3", optional = true, markers = "extra == \"standard\""} python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} -uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" and extra == \"standard\""} watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} [package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] [[package]] name = "uvloop" @@ -3034,6 +3328,8 @@ version = "0.21.0" description = "Fast implementation of asyncio event loop on top of libuv" optional = false python-versions = ">=3.8.0" +groups = ["main"] +markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"" files = [ {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, @@ -3085,6 +3381,7 @@ version = "1.0.5" description = "Simple, modern and high performance file watching and code reload in python." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "watchfiles-1.0.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5c40fe7dd9e5f81e0847b1ea64e1f5dd79dd61afbedb57759df06767ac719b40"}, {file = "watchfiles-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c0db396e6003d99bb2d7232c957b5f0b5634bbd1b24e381a5afcc880f7373fb"}, @@ -3168,6 +3465,7 @@ version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, @@ -3179,6 +3477,7 @@ version = "15.0.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, @@ -3251,12 +3550,43 @@ files = [ {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, ] +[[package]] +name = "yfinance" +version = "0.2.65" +description = "Download market data from Yahoo! Finance API" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "yfinance-0.2.65-py2.py3-none-any.whl", hash = "sha256:7be13abb0d80a17230bf798e9c6a324fa2bef0846684a6d4f7fa2abd21938963"}, + {file = "yfinance-0.2.65.tar.gz", hash = "sha256:3d465e58c49be9d61f9862829de3e00bef6b623809f32f4efb5197b62fc60485"}, +] + +[package.dependencies] +beautifulsoup4 = ">=4.11.1" +curl_cffi = ">=0.7" +frozendict = ">=2.3.4" +multitasking = ">=0.0.7" +numpy = ">=1.16.5" +pandas = ">=1.3.0" +peewee = ">=3.16.2" +platformdirs = ">=2.0.0" +protobuf = ">=3.19.0" +pytz = ">=2022.5" +requests = ">=2.31" +websockets = ">=13.0" + +[package.extras] +nospam = ["requests_cache (>=1.0)", "requests_ratelimiter (>=0.3.1)"] +repair = ["scipy (>=1.6.3)"] + [[package]] name = "zstandard" version = "0.23.0" description = "Zstandard bindings for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9"}, {file = "zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880"}, @@ -3364,6 +3694,6 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\ cffi = ["cffi (>=1.11)"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.11" -content-hash = "295166944ae8362bff38e4b409259aa7c0afa5ade47573dbeadba90071b81134" +content-hash = "3077ba9c1ae9cc37aef74c4d613bd5919a58f7710cdc916775f4f38e4f46f792" diff --git a/pyproject.toml b/pyproject.toml index c6a29c17a..a79e736bf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ httpx = "^0.27.0" sqlalchemy = "^2.0.22" alembic = "^1.12.0" langchain-gigachat = "^0.3.12" +yfinance = "^0.2.65" [tool.poetry.group.dev.dependencies] pytest = "^7.4.0" diff --git a/src/agents/aswath_damodaran.py b/src/agents/aswath_damodaran.py index a1efbd268..1e5758926 100644 --- a/src/agents/aswath_damodaran.py +++ b/src/agents/aswath_damodaran.py @@ -13,6 +13,7 @@ get_market_cap, search_line_items, ) +from src.data.providers import get_data_provider_for_agent from src.utils.api_key import get_api_key_from_state from src.utils.llm import call_llm from src.utils.progress import progress @@ -37,6 +38,8 @@ def aswath_damodaran_agent(state: AgentState, agent_id: str = "aswath_damodaran_ end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) analysis_data: dict[str, dict] = {} damodaran_signals: dict[str, dict] = {} @@ -44,7 +47,7 @@ def aswath_damodaran_agent(state: AgentState, agent_id: str = "aswath_damodaran_ for ticker in tickers: # ─── Fetch core data ──────────────────────────────────────────────────── progress.update_status(agent_id, ticker, "Fetching financial metrics") - metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=5, api_key=api_key) + metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=5, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching financial line items") line_items = search_line_items( @@ -61,10 +64,11 @@ def aswath_damodaran_agent(state: AgentState, agent_id: str = "aswath_damodaran_ ], end_date, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) # ─── Analyses ─────────────────────────────────────────────────────────── progress.update_status(agent_id, ticker, "Analyzing growth and reinvestment") diff --git a/src/agents/ben_graham.py b/src/agents/ben_graham.py index 807175abb..1eeb06bc4 100644 --- a/src/agents/ben_graham.py +++ b/src/agents/ben_graham.py @@ -1,5 +1,6 @@ from src.graph.state import AgentState, show_agent_reasoning from src.tools.api import get_financial_metrics, get_market_cap, search_line_items +from src.data.providers import get_data_provider_for_agent from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import HumanMessage from pydantic import BaseModel @@ -29,19 +30,21 @@ def ben_graham_agent(state: AgentState, agent_id: str = "ben_graham_agent"): end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) analysis_data = {} graham_analysis = {} for ticker in tickers: progress.update_status(agent_id, ticker, "Fetching financial metrics") - metrics = get_financial_metrics(ticker, end_date, period="annual", limit=10, api_key=api_key) + metrics = get_financial_metrics(ticker, end_date, period="annual", limit=10, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Gathering financial line items") - financial_line_items = search_line_items(ticker, ["earnings_per_share", "revenue", "net_income", "book_value_per_share", "total_assets", "total_liabilities", "current_assets", "current_liabilities", "dividends_and_other_cash_distributions", "outstanding_shares"], end_date, period="annual", limit=10, api_key=api_key) + financial_line_items = search_line_items(ticker, ["earnings_per_share", "revenue", "net_income", "book_value_per_share", "total_assets", "total_liabilities", "current_assets", "current_liabilities", "dividends_and_other_cash_distributions", "outstanding_shares"], end_date, period="annual", limit=10, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) # Perform sub-analyses progress.update_status(agent_id, ticker, "Analyzing earnings stability") @@ -109,7 +112,7 @@ def analyze_earnings_stability(metrics: list, financial_line_items: list) -> dic eps_vals = [] for item in financial_line_items: - if item.earnings_per_share is not None: + if hasattr(item, "earnings_per_share") and item.earnings_per_share is not None: eps_vals.append(item.earnings_per_share) if len(eps_vals) < 2: @@ -149,11 +152,10 @@ def analyze_financial_strength(financial_line_items: list) -> dict: if not financial_line_items: return {"score": score, "details": "No data for financial strength analysis"} - latest_item = financial_line_items[0] - total_assets = latest_item.total_assets or 0 - total_liabilities = latest_item.total_liabilities or 0 - current_assets = latest_item.current_assets or 0 - current_liabilities = latest_item.current_liabilities or 0 + total_assets = get_line_item_value(financial_line_items, "total_assets") + total_liabilities = get_line_item_value(financial_line_items, "total_liabilities") + current_assets = get_line_item_value(financial_line_items, "current_assets") + current_liabilities = get_line_item_value(financial_line_items, "current_liabilities") # 1. Current ratio if current_liabilities > 0: @@ -184,7 +186,7 @@ def analyze_financial_strength(financial_line_items: list) -> dict: details.append("Cannot compute debt ratio (missing total_assets).") # 3. Dividend track record - div_periods = [item.dividends_and_other_cash_distributions for item in financial_line_items if item.dividends_and_other_cash_distributions is not None] + div_periods = [item.dividends_and_other_cash_distributions for item in financial_line_items if hasattr(item, "dividends_and_other_cash_distributions") and item.dividends_and_other_cash_distributions is not None] if div_periods: # In many data feeds, dividend outflow is shown as a negative number # (money going out to shareholders). We'll consider any negative as 'paid a dividend'. @@ -204,6 +206,15 @@ def analyze_financial_strength(financial_line_items: list) -> dict: return {"score": score, "details": "; ".join(details)} +def get_line_item_value(financial_line_items: list, line_item_name: str) -> float: + """Helper function to extract value for a specific line item.""" + for item in financial_line_items: + if hasattr(item, line_item_name): + value = getattr(item, line_item_name) + if value is not None: + return value + return 0 + def analyze_valuation_graham(financial_line_items: list, market_cap: float) -> dict: """ Core Graham approach to valuation: @@ -214,12 +225,11 @@ def analyze_valuation_graham(financial_line_items: list, market_cap: float) -> d if not financial_line_items or not market_cap or market_cap <= 0: return {"score": 0, "details": "Insufficient data to perform valuation"} - latest = financial_line_items[0] - current_assets = latest.current_assets or 0 - total_liabilities = latest.total_liabilities or 0 - book_value_ps = latest.book_value_per_share or 0 - eps = latest.earnings_per_share or 0 - shares_outstanding = latest.outstanding_shares or 0 + current_assets = get_line_item_value(financial_line_items, "current_assets") + total_liabilities = get_line_item_value(financial_line_items, "total_liabilities") + book_value_ps = get_line_item_value(financial_line_items, "book_value_per_share") + eps = get_line_item_value(financial_line_items, "earnings_per_share") + shares_outstanding = get_line_item_value(financial_line_items, "outstanding_shares") details = [] score = 0 diff --git a/src/agents/bill_ackman.py b/src/agents/bill_ackman.py index cc7aa27a7..d260d0e06 100644 --- a/src/agents/bill_ackman.py +++ b/src/agents/bill_ackman.py @@ -1,5 +1,6 @@ from src.graph.state import AgentState, show_agent_reasoning from src.tools.api import get_financial_metrics, get_market_cap, search_line_items +from src.data.providers import get_data_provider_for_agent from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import HumanMessage from pydantic import BaseModel @@ -26,12 +27,14 @@ def bill_ackman_agent(state: AgentState, agent_id: str = "bill_ackman_agent"): end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) analysis_data = {} ackman_analysis = {} for ticker in tickers: progress.update_status(agent_id, ticker, "Fetching financial metrics") - metrics = get_financial_metrics(ticker, end_date, period="annual", limit=5, api_key=api_key) + metrics = get_financial_metrics(ticker, end_date, period="annual", limit=5, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Gathering financial line items") # Request multiple periods of data (annual or TTM) for a more robust long-term view. @@ -53,10 +56,11 @@ def bill_ackman_agent(state: AgentState, agent_id: str = "bill_ackman_agent"): period="annual", limit=5, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Analyzing business quality") quality_analysis = analyze_business_quality(metrics, financial_line_items) @@ -150,7 +154,7 @@ def analyze_business_quality(metrics: list, financial_line_items: list) -> dict: } # 1. Multi-period revenue growth analysis - revenues = [item.revenue for item in financial_line_items if item.revenue is not None] + revenues = [getattr(item, 'revenue', None) for item in financial_line_items if getattr(item, 'revenue', None) is not None] if len(revenues) >= 2: initial, final = revenues[-1], revenues[0] if initial and final and final > initial: @@ -167,8 +171,8 @@ def analyze_business_quality(metrics: list, financial_line_items: list) -> dict: details.append("Not enough revenue data for multi-period trend.") # 2. Operating margin and free cash flow consistency - fcf_vals = [item.free_cash_flow for item in financial_line_items if item.free_cash_flow is not None] - op_margin_vals = [item.operating_margin for item in financial_line_items if item.operating_margin is not None] + fcf_vals = [getattr(item, 'free_cash_flow', None) for item in financial_line_items if getattr(item, 'free_cash_flow', None) is not None] + op_margin_vals = [getattr(item, 'operating_margin', None) for item in financial_line_items if getattr(item, 'operating_margin', None) is not None] if op_margin_vals: above_15 = sum(1 for m in op_margin_vals if m > 0.15) @@ -228,7 +232,7 @@ def analyze_financial_discipline(metrics: list, financial_line_items: list) -> d } # 1. Multi-period debt ratio or debt_to_equity - debt_to_equity_vals = [item.debt_to_equity for item in financial_line_items if item.debt_to_equity is not None] + debt_to_equity_vals = [getattr(item, 'debt_to_equity', None) for item in financial_line_items if getattr(item, 'debt_to_equity', None) is not None] if debt_to_equity_vals: below_one_count = sum(1 for d in debt_to_equity_vals if d < 1.0) if below_one_count >= (len(debt_to_equity_vals) // 2 + 1): @@ -255,9 +259,9 @@ def analyze_financial_discipline(metrics: list, financial_line_items: list) -> d # 2. Capital allocation approach (dividends + share counts) dividends_list = [ - item.dividends_and_other_cash_distributions + getattr(item, 'dividends_and_other_cash_distributions', None) for item in financial_line_items - if item.dividends_and_other_cash_distributions is not None + if getattr(item, 'dividends_and_other_cash_distributions', None) is not None ] if dividends_list: paying_dividends_count = sum(1 for d in dividends_list if d < 0) @@ -270,7 +274,7 @@ def analyze_financial_discipline(metrics: list, financial_line_items: list) -> d details.append("No dividend data found across periods.") # Check for decreasing share count (simple approach) - shares = [item.outstanding_shares for item in financial_line_items if item.outstanding_shares is not None] + shares = [getattr(item, 'outstanding_shares', None) for item in financial_line_items if getattr(item, 'outstanding_shares', None) is not None] if len(shares) >= 2: # For buybacks, the newest count should be less than the oldest count if shares[0] < shares[-1]: @@ -303,8 +307,8 @@ def analyze_activism_potential(financial_line_items: list) -> dict: } # Check revenue growth vs. operating margin - revenues = [item.revenue for item in financial_line_items if item.revenue is not None] - op_margins = [item.operating_margin for item in financial_line_items if item.operating_margin is not None] + revenues = [getattr(item, 'revenue', None) for item in financial_line_items if getattr(item, 'revenue', None) is not None] + op_margins = [getattr(item, 'operating_margin', None) for item in financial_line_items if getattr(item, 'operating_margin', None) is not None] if len(revenues) < 2 or not op_margins: return { diff --git a/src/agents/cathie_wood.py b/src/agents/cathie_wood.py index 1e6913900..5f3be50d8 100644 --- a/src/agents/cathie_wood.py +++ b/src/agents/cathie_wood.py @@ -1,5 +1,6 @@ from src.graph.state import AgentState, show_agent_reasoning from src.tools.api import get_financial_metrics, get_market_cap, search_line_items +from src.data.providers import get_data_provider_for_agent from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import HumanMessage from pydantic import BaseModel @@ -28,12 +29,14 @@ def cathie_wood_agent(state: AgentState, agent_id: str = "cathie_wood_agent"): end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) analysis_data = {} cw_analysis = {} for ticker in tickers: progress.update_status(agent_id, ticker, "Fetching financial metrics") - metrics = get_financial_metrics(ticker, end_date, period="annual", limit=5, api_key=api_key) + metrics = get_financial_metrics(ticker, end_date, period="annual", limit=5, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Gathering financial line items") # Request multiple periods of data (annual or TTM) for a more robust view. @@ -57,10 +60,11 @@ def cathie_wood_agent(state: AgentState, agent_id: str = "cathie_wood_agent"): period="annual", limit=5, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Analyzing disruptive potential") disruptive_analysis = analyze_disruptive_potential(metrics, financial_line_items) @@ -125,7 +129,7 @@ def analyze_disruptive_potential(metrics: list, financial_line_items: list) -> d return {"score": 0, "details": "Insufficient data to analyze disruptive potential"} # 1. Revenue Growth Analysis - Check for accelerating growth - revenues = [item.revenue for item in financial_line_items if item.revenue] + revenues = [getattr(item, 'revenue', None) for item in financial_line_items if getattr(item, 'revenue', None)] if len(revenues) >= 3: # Need at least 3 periods to check acceleration growth_rates = [] for i in range(len(revenues) - 1): @@ -171,7 +175,7 @@ def analyze_disruptive_potential(metrics: list, financial_line_items: list) -> d details.append("Insufficient gross margin data") # 3. Operating Leverage Analysis - revenues = [item.revenue for item in financial_line_items if item.revenue] + revenues = [getattr(item, 'revenue', None) for item in financial_line_items if getattr(item, 'revenue', None)] operating_expenses = [item.operating_expense for item in financial_line_items if hasattr(item, "operating_expense") and item.operating_expense] if len(revenues) >= 2 and len(operating_expenses) >= 2: @@ -185,7 +189,7 @@ def analyze_disruptive_potential(metrics: list, financial_line_items: list) -> d details.append("Insufficient data for operating leverage analysis") # 4. R&D Investment Analysis - rd_expenses = [item.research_and_development for item in financial_line_items if hasattr(item, "research_and_development") and item.research_and_development is not None] + rd_expenses = [getattr(item, 'research_and_development', None) for item in financial_line_items if getattr(item, 'research_and_development', None) is not None] if rd_expenses and revenues: rd_intensity = rd_expenses[0] / revenues[0] if rd_intensity > 0.15: # High R&D intensity @@ -224,8 +228,8 @@ def analyze_innovation_growth(metrics: list, financial_line_items: list) -> dict return {"score": 0, "details": "Insufficient data to analyze innovation-driven growth"} # 1. R&D Investment Trends - rd_expenses = [item.research_and_development for item in financial_line_items if hasattr(item, "research_and_development") and item.research_and_development] - revenues = [item.revenue for item in financial_line_items if item.revenue] + rd_expenses = [getattr(item, 'research_and_development', None) for item in financial_line_items if getattr(item, 'research_and_development', None)] + revenues = [getattr(item, 'revenue', None) for item in financial_line_items if getattr(item, 'revenue', None)] if rd_expenses and revenues and len(rd_expenses) >= 2: rd_growth = (rd_expenses[0] - rd_expenses[-1]) / abs(rd_expenses[-1]) if rd_expenses[-1] != 0 else 0 @@ -246,7 +250,7 @@ def analyze_innovation_growth(metrics: list, financial_line_items: list) -> dict details.append("Insufficient R&D data for trend analysis") # 2. Free Cash Flow Analysis - fcf_vals = [item.free_cash_flow for item in financial_line_items if item.free_cash_flow] + fcf_vals = [getattr(item, 'free_cash_flow', None) for item in financial_line_items if getattr(item, 'free_cash_flow', None)] if fcf_vals and len(fcf_vals) >= 2: fcf_growth = (fcf_vals[0] - fcf_vals[-1]) / abs(fcf_vals[-1]) positive_fcf_count = sum(1 for f in fcf_vals if f > 0) @@ -264,7 +268,7 @@ def analyze_innovation_growth(metrics: list, financial_line_items: list) -> dict details.append("Insufficient FCF data for analysis") # 3. Operating Efficiency Analysis - op_margin_vals = [item.operating_margin for item in financial_line_items if item.operating_margin] + op_margin_vals = [getattr(item, 'operating_margin', None) for item in financial_line_items if getattr(item, 'operating_margin', None)] if op_margin_vals and len(op_margin_vals) >= 2: margin_trend = op_margin_vals[0] - op_margin_vals[-1] diff --git a/src/agents/charlie_munger.py b/src/agents/charlie_munger.py index 444f9cb56..fcaf88d53 100644 --- a/src/agents/charlie_munger.py +++ b/src/agents/charlie_munger.py @@ -1,5 +1,6 @@ from src.graph.state import AgentState, show_agent_reasoning from src.tools.api import get_financial_metrics, get_market_cap, search_line_items, get_insider_trades, get_company_news +from src.data.providers import get_data_provider_for_agent from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import HumanMessage from pydantic import BaseModel @@ -24,12 +25,14 @@ def charlie_munger_agent(state: AgentState, agent_id: str = "charlie_munger_agen end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) analysis_data = {} munger_analysis = {} for ticker in tickers: progress.update_status(agent_id, ticker, "Fetching financial metrics") - metrics = get_financial_metrics(ticker, end_date, period="annual", limit=10, api_key=api_key) # Munger looks at longer periods + metrics = get_financial_metrics(ticker, end_date, period="annual", limit=10, api_key=api_key, data_provider=data_provider) # Munger looks at longer periods progress.update_status(agent_id, ticker, "Gathering financial line items") financial_line_items = search_line_items( @@ -54,10 +57,11 @@ def charlie_munger_agent(state: AgentState, agent_id: str = "charlie_munger_agen period="annual", limit=10, # Munger examines long-term trends api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching insider trades") # Munger values management with skin in the game @@ -68,6 +72,7 @@ def charlie_munger_agent(state: AgentState, agent_id: str = "charlie_munger_agen start_date=None, limit=100, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Fetching company news") @@ -79,6 +84,7 @@ def charlie_munger_agent(state: AgentState, agent_id: str = "charlie_munger_agen start_date=None, limit=100, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Analyzing moat strength") @@ -243,10 +249,10 @@ def analyze_moat_strength(metrics: list, financial_line_items: list) -> dict: details.append("Insufficient data for capital intensity analysis") # 4. Intangible assets - Munger values R&D and intellectual property - r_and_d = [item.research_and_development for item in financial_line_items - if hasattr(item, 'research_and_development') and item.research_and_development is not None] + r_and_d = [getattr(item, 'research_and_development', None) for item in financial_line_items + if getattr(item, 'research_and_development', None) is not None] - goodwill_and_intangible_assets = [item.goodwill_and_intangible_assets for item in financial_line_items + goodwill_and_intangible_assets = [getattr(item, 'goodwill_and_intangible_assets', None) for item in financial_line_items if hasattr(item, 'goodwill_and_intangible_assets') and item.goodwill_and_intangible_assets is not None] if r_and_d and len(r_and_d) > 0: @@ -287,10 +293,10 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list) # 1. Capital allocation - Check FCF to net income ratio # Munger values companies that convert earnings to cash - fcf_values = [item.free_cash_flow for item in financial_line_items + fcf_values = [getattr(item, 'free_cash_flow', None) for item in financial_line_items if hasattr(item, 'free_cash_flow') and item.free_cash_flow is not None] - net_income_values = [item.net_income for item in financial_line_items + net_income_values = [getattr(item, 'net_income', None) for item in financial_line_items if hasattr(item, 'net_income') and item.net_income is not None] if fcf_values and net_income_values and len(fcf_values) == len(net_income_values): @@ -319,10 +325,10 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list) details.append("Missing FCF or Net Income data") # 2. Debt management - Munger is cautious about debt - debt_values = [item.total_debt for item in financial_line_items + debt_values = [getattr(item, 'total_debt', None) for item in financial_line_items if hasattr(item, 'total_debt') and item.total_debt is not None] - equity_values = [item.shareholders_equity for item in financial_line_items + equity_values = [getattr(item, 'shareholders_equity', None) for item in financial_line_items if hasattr(item, 'shareholders_equity') and item.shareholders_equity is not None] if debt_values and equity_values and len(debt_values) == len(equity_values): @@ -344,9 +350,9 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list) details.append("Missing debt or equity data") # 3. Cash management efficiency - Munger values appropriate cash levels - cash_values = [item.cash_and_equivalents for item in financial_line_items + cash_values = [getattr(item, 'cash_and_equivalents', None) for item in financial_line_items if hasattr(item, 'cash_and_equivalents') and item.cash_and_equivalents is not None] - revenue_values = [item.revenue for item in financial_line_items + revenue_values = [getattr(item, 'revenue', None) for item in financial_line_items if hasattr(item, 'revenue') and item.revenue is not None] if cash_values and revenue_values and len(cash_values) > 0 and len(revenue_values) > 0: @@ -399,7 +405,7 @@ def analyze_management_quality(financial_line_items: list, insider_trades: list) details.append("No insider trading data available") # 5. Consistency in share count - Munger prefers stable/decreasing shares - share_counts = [item.outstanding_shares for item in financial_line_items + share_counts = [getattr(item, 'outstanding_shares', None) for item in financial_line_items if hasattr(item, 'outstanding_shares') and item.outstanding_shares is not None] if share_counts and len(share_counts) >= 3: @@ -442,7 +448,7 @@ def analyze_predictability(financial_line_items: list) -> dict: } # 1. Revenue stability and growth - revenues = [item.revenue for item in financial_line_items + revenues = [getattr(item, 'revenue', None) for item in financial_line_items if hasattr(item, 'revenue') and item.revenue is not None] if revenues and len(revenues) >= 5: @@ -477,7 +483,7 @@ def analyze_predictability(financial_line_items: list) -> dict: details.append("Insufficient revenue history for predictability analysis") # 2. Operating income stability - op_income = [item.operating_income for item in financial_line_items + op_income = [getattr(item, 'operating_income', None) for item in financial_line_items if hasattr(item, 'operating_income') and item.operating_income is not None] if op_income and len(op_income) >= 5: @@ -502,7 +508,7 @@ def analyze_predictability(financial_line_items: list) -> dict: details.append("Insufficient operating income history") # 3. Margin consistency - Munger values stable margins - op_margins = [item.operating_margin for item in financial_line_items + op_margins = [getattr(item, 'operating_margin', None) for item in financial_line_items if hasattr(item, 'operating_margin') and item.operating_margin is not None] if op_margins and len(op_margins) >= 5: @@ -522,7 +528,7 @@ def analyze_predictability(financial_line_items: list) -> dict: details.append("Insufficient margin history") # 4. Cash generation reliability - fcf_values = [item.free_cash_flow for item in financial_line_items + fcf_values = [getattr(item, 'free_cash_flow', None) for item in financial_line_items if hasattr(item, 'free_cash_flow') and item.free_cash_flow is not None] if fcf_values and len(fcf_values) >= 5: @@ -569,7 +575,7 @@ def calculate_munger_valuation(financial_line_items: list, market_cap: float) -> } # Get FCF values (Munger's preferred "owner earnings" metric) - fcf_values = [item.free_cash_flow for item in financial_line_items + fcf_values = [getattr(item, 'free_cash_flow', None) for item in financial_line_items if hasattr(item, 'free_cash_flow') and item.free_cash_flow is not None] if not fcf_values or len(fcf_values) < 3: diff --git a/src/agents/fundamentals.py b/src/agents/fundamentals.py index ab658eb44..159f44b5d 100644 --- a/src/agents/fundamentals.py +++ b/src/agents/fundamentals.py @@ -5,6 +5,7 @@ import json from src.tools.api import get_financial_metrics +from src.data.providers import get_data_provider_for_agent ##### Fundamental Agent ##### @@ -14,6 +15,8 @@ def fundamentals_analyst_agent(state: AgentState, agent_id: str = "fundamentals_ end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) # Initialize fundamental analysis for each ticker fundamental_analysis = {} @@ -27,6 +30,7 @@ def fundamentals_analyst_agent(state: AgentState, agent_id: str = "fundamentals_ period="ttm", limit=10, api_key=api_key, + data_provider=data_provider, ) if not financial_metrics: diff --git a/src/agents/michael_burry.py b/src/agents/michael_burry.py index 80f6e6953..0622038d5 100644 --- a/src/agents/michael_burry.py +++ b/src/agents/michael_burry.py @@ -16,6 +16,7 @@ get_market_cap, search_line_items, ) +from src.data.providers import get_data_provider_for_agent from src.utils.llm import call_llm from src.utils.progress import progress from src.utils.api_key import get_api_key_from_state @@ -32,6 +33,8 @@ class MichaelBurrySignal(BaseModel): def michael_burry_agent(state: AgentState, agent_id: str = "michael_burry_agent"): """Analyse stocks using Michael Burry's deep‑value, contrarian framework.""" api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) data = state["data"] end_date: str = data["end_date"] # YYYY‑MM‑DD tickers: list[str] = data["tickers"] @@ -47,7 +50,7 @@ def michael_burry_agent(state: AgentState, agent_id: str = "michael_burry_agent" # Fetch raw data # ------------------------------------------------------------------ progress.update_status(agent_id, ticker, "Fetching financial metrics") - metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=5, api_key=api_key) + metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=5, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching line items") line_items = search_line_items( @@ -64,16 +67,17 @@ def michael_burry_agent(state: AgentState, agent_id: str = "michael_burry_agent" ], end_date, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Fetching insider trades") - insider_trades = get_insider_trades(ticker, end_date=end_date, start_date=start_date) + insider_trades = get_insider_trades(ticker, end_date=end_date, start_date=start_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching company news") - news = get_company_news(ticker, end_date=end_date, start_date=start_date, limit=250) + news = get_company_news(ticker, end_date=end_date, start_date=start_date, limit=250, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) # ------------------------------------------------------------------ # Run sub‑analyses diff --git a/src/agents/peter_lynch.py b/src/agents/peter_lynch.py index e936a650a..ffe896c08 100644 --- a/src/agents/peter_lynch.py +++ b/src/agents/peter_lynch.py @@ -5,6 +5,7 @@ get_insider_trades, get_company_news, ) +from src.data.providers import get_data_provider_for_agent from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import HumanMessage from pydantic import BaseModel @@ -43,6 +44,8 @@ def peter_lynch_agent(state: AgentState, agent_id: str = "peter_lynch_agent"): end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) analysis_data = {} lynch_analysis = {} @@ -69,16 +72,17 @@ def peter_lynch_agent(state: AgentState, agent_id: str = "peter_lynch_agent"): period="annual", limit=5, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching insider trades") - insider_trades = get_insider_trades(ticker, end_date, limit=50, api_key=api_key) + insider_trades = get_insider_trades(ticker, end_date, limit=50, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching company news") - company_news = get_company_news(ticker, end_date, limit=50, api_key=api_key) + company_news = get_company_news(ticker, end_date, limit=50, api_key=api_key, data_provider=data_provider) # Perform sub-analyses: progress.update_status(agent_id, ticker, "Analyzing growth") diff --git a/src/agents/phil_fisher.py b/src/agents/phil_fisher.py index 7dba42916..fe511b5c4 100644 --- a/src/agents/phil_fisher.py +++ b/src/agents/phil_fisher.py @@ -5,6 +5,7 @@ get_insider_trades, get_company_news, ) +from src.data.providers import get_data_provider_for_agent from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import HumanMessage from pydantic import BaseModel @@ -37,6 +38,8 @@ def phil_fisher_agent(state: AgentState, agent_id: str = "phil_fisher_agent"): end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) analysis_data = {} fisher_analysis = {} @@ -68,16 +71,17 @@ def phil_fisher_agent(state: AgentState, agent_id: str = "phil_fisher_agent"): period="annual", limit=5, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching insider trades") - insider_trades = get_insider_trades(ticker, end_date, limit=50, api_key=api_key) + insider_trades = get_insider_trades(ticker, end_date, limit=50, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching company news") - company_news = get_company_news(ticker, end_date, limit=50, api_key=api_key) + company_news = get_company_news(ticker, end_date, limit=50, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Analyzing growth & quality") growth_quality = analyze_fisher_growth_quality(financial_line_items) @@ -228,7 +232,7 @@ def analyze_fisher_growth_quality(financial_line_items: list) -> dict: details.append("Not enough EPS data points for growth calculation.") # 3. R&D as % of Revenue (if we have R&D data) - rnd_values = [fi.research_and_development for fi in financial_line_items if fi.research_and_development is not None] + rnd_values = [getattr(fi, 'research_and_development', None) for fi in financial_line_items if getattr(fi, 'research_and_development', None) is not None] if rnd_values and revenues and len(rnd_values) == len(revenues): # We'll just look at the most recent for a simple measure recent_rnd = rnd_values[0] diff --git a/src/agents/rakesh_jhunjhunwala.py b/src/agents/rakesh_jhunjhunwala.py index d73b28191..b43c7d82d 100644 --- a/src/agents/rakesh_jhunjhunwala.py +++ b/src/agents/rakesh_jhunjhunwala.py @@ -5,6 +5,7 @@ import json from typing_extensions import Literal from src.tools.api import get_financial_metrics, get_market_cap, search_line_items +from src.data.providers import get_data_provider_for_agent from src.utils.llm import call_llm from src.utils.progress import progress from src.utils.api_key import get_api_key_from_state @@ -20,6 +21,8 @@ def rakesh_jhunjhunwala_agent(state: AgentState, agent_id: str = "rakesh_jhunjhu end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) # Collect all analysis for LLM reasoning analysis_data = {} jhunjhunwala_analysis = {} @@ -28,7 +31,7 @@ def rakesh_jhunjhunwala_agent(state: AgentState, agent_id: str = "rakesh_jhunjhu # Core Data progress.update_status(agent_id, ticker, "Fetching financial metrics") - metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=5, api_key=api_key) + metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=5, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching financial line items") financial_line_items = search_line_items( @@ -50,10 +53,11 @@ def rakesh_jhunjhunwala_agent(state: AgentState, agent_id: str = "rakesh_jhunjhu ], end_date, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) # ─── Analyses ─────────────────────────────────────────────────────────── progress.update_status(agent_id, ticker, "Analyzing growth") diff --git a/src/agents/risk_manager.py b/src/agents/risk_manager.py index aaa16718a..a913cf0a9 100644 --- a/src/agents/risk_manager.py +++ b/src/agents/risk_manager.py @@ -2,6 +2,7 @@ from src.graph.state import AgentState, show_agent_reasoning from src.utils.progress import progress from src.tools.api import get_prices, prices_to_df +from src.data.providers import get_data_provider_for_agent import json import numpy as np import pandas as pd @@ -14,6 +15,8 @@ def risk_management_agent(state: AgentState, agent_id: str = "risk_management_ag data = state["data"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) # Initialize risk analysis for each ticker risk_analysis = {} @@ -31,6 +34,7 @@ def risk_management_agent(state: AgentState, agent_id: str = "risk_management_ag start_date=data["start_date"], end_date=data["end_date"], api_key=api_key, + data_provider=data_provider, ) if not prices: diff --git a/src/agents/sentiment.py b/src/agents/sentiment.py index c5a6165d3..61d2c6560 100644 --- a/src/agents/sentiment.py +++ b/src/agents/sentiment.py @@ -6,6 +6,7 @@ import json from src.utils.api_key import get_api_key_from_state from src.tools.api import get_insider_trades, get_company_news +from src.data.providers import get_data_provider_for_agent ##### Sentiment Agent ##### @@ -15,6 +16,8 @@ def sentiment_analyst_agent(state: AgentState, agent_id: str = "sentiment_analys end_date = data.get("end_date") tickers = data.get("tickers") api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) # Initialize sentiment analysis for each ticker sentiment_analysis = {} @@ -27,6 +30,7 @@ def sentiment_analyst_agent(state: AgentState, agent_id: str = "sentiment_analys end_date=end_date, limit=1000, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Analyzing trading patterns") @@ -38,7 +42,7 @@ def sentiment_analyst_agent(state: AgentState, agent_id: str = "sentiment_analys progress.update_status(agent_id, ticker, "Fetching company news") # Get the company news - company_news = get_company_news(ticker, end_date, limit=100, api_key=api_key) + company_news = get_company_news(ticker, end_date, limit=100, api_key=api_key, data_provider=data_provider) # Get the sentiment from the company news sentiment = pd.Series([n.sentiment for n in company_news]).dropna() diff --git a/src/agents/stanley_druckenmiller.py b/src/agents/stanley_druckenmiller.py index 073ce1d76..8121895fb 100644 --- a/src/agents/stanley_druckenmiller.py +++ b/src/agents/stanley_druckenmiller.py @@ -7,6 +7,7 @@ get_company_news, get_prices, ) +from src.data.providers import get_data_provider_for_agent from langchain_core.prompts import ChatPromptTemplate from langchain_core.messages import HumanMessage from pydantic import BaseModel @@ -38,12 +39,14 @@ def stanley_druckenmiller_agent(state: AgentState, agent_id: str = "stanley_druc end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) analysis_data = {} druck_analysis = {} for ticker in tickers: progress.update_status(agent_id, ticker, "Fetching financial metrics") - metrics = get_financial_metrics(ticker, end_date, period="annual", limit=5, api_key=api_key) + metrics = get_financial_metrics(ticker, end_date, period="annual", limit=5, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Gathering financial line items") # Include relevant line items for Stan Druckenmiller's approach: @@ -73,19 +76,20 @@ def stanley_druckenmiller_agent(state: AgentState, agent_id: str = "stanley_druc period="annual", limit=5, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching insider trades") - insider_trades = get_insider_trades(ticker, end_date, limit=50, api_key=api_key) + insider_trades = get_insider_trades(ticker, end_date, limit=50, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching company news") - company_news = get_company_news(ticker, end_date, limit=50, api_key=api_key) + company_news = get_company_news(ticker, end_date, limit=50, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Fetching recent price data for momentum") - prices = get_prices(ticker, start_date=start_date, end_date=end_date, api_key=api_key) + prices = get_prices(ticker, start_date=start_date, end_date=end_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Analyzing growth & momentum") growth_momentum_analysis = analyze_growth_and_momentum(financial_line_items, prices) diff --git a/src/agents/technicals.py b/src/agents/technicals.py index 8bcdbd9ed..541031a76 100644 --- a/src/agents/technicals.py +++ b/src/agents/technicals.py @@ -9,6 +9,7 @@ import numpy as np from src.tools.api import get_prices, prices_to_df +from src.data.providers import get_data_provider_for_agent from src.utils.progress import progress @@ -46,6 +47,8 @@ def technical_analyst_agent(state: AgentState, agent_id: str = "technical_analys end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) # Initialize analysis for each ticker technical_analysis = {} @@ -58,6 +61,7 @@ def technical_analyst_agent(state: AgentState, agent_id: str = "technical_analys start_date=start_date, end_date=end_date, api_key=api_key, + data_provider=data_provider, ) if not prices: diff --git a/src/agents/valuation.py b/src/agents/valuation.py index f6e4d0f11..c6dbad4a3 100644 --- a/src/agents/valuation.py +++ b/src/agents/valuation.py @@ -17,6 +17,7 @@ get_market_cap, search_line_items, ) +from src.data.providers import get_data_provider_for_agent def valuation_analyst_agent(state: AgentState, agent_id: str = "valuation_analyst_agent"): """Run valuation across tickers and write signals back to `state`.""" @@ -25,6 +26,8 @@ def valuation_analyst_agent(state: AgentState, agent_id: str = "valuation_analys end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) valuation_analysis: dict[str, dict] = {} for ticker in tickers: @@ -37,6 +40,7 @@ def valuation_analyst_agent(state: AgentState, agent_id: str = "valuation_analys period="ttm", limit=8, api_key=api_key, + data_provider=data_provider, ) if not financial_metrics: progress.update_status(agent_id, ticker, "Failed: No financial metrics found") @@ -58,6 +62,7 @@ def valuation_analyst_agent(state: AgentState, agent_id: str = "valuation_analys period="ttm", limit=2, api_key=api_key, + data_provider=data_provider, ) if len(line_items) < 2: progress.update_status(agent_id, ticker, "Failed: Insufficient financial line items") @@ -67,13 +72,16 @@ def valuation_analyst_agent(state: AgentState, agent_id: str = "valuation_analys # ------------------------------------------------------------------ # Valuation models # ------------------------------------------------------------------ - wc_change = li_curr.working_capital - li_prev.working_capital + # Calculate working capital change (current_assets - current_liabilities) + wc_curr = (getattr(li_curr, 'current_assets', 0) or 0) - (getattr(li_curr, 'current_liabilities', 0) or 0) + wc_prev = (getattr(li_prev, 'current_assets', 0) or 0) - (getattr(li_prev, 'current_liabilities', 0) or 0) + wc_change = wc_curr - wc_prev # Owner Earnings owner_val = calculate_owner_earnings_value( - net_income=li_curr.net_income, - depreciation=li_curr.depreciation_and_amortization, - capex=li_curr.capital_expenditure, + net_income=getattr(li_curr, 'net_income', 0) or 0, + depreciation=getattr(li_curr, 'depreciation_and_amortization', 0) or 0, + capex=getattr(li_curr, 'capital_expenditure', 0) or 0, working_capital_change=wc_change, growth_rate=most_recent_metrics.earnings_growth or 0.05, ) @@ -101,7 +109,7 @@ def valuation_analyst_agent(state: AgentState, agent_id: str = "valuation_analys # ------------------------------------------------------------------ # Aggregate & signal # ------------------------------------------------------------------ - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) if not market_cap: progress.update_status(agent_id, ticker, "Failed: Market cap unavailable") continue diff --git a/src/agents/warren_buffett.py b/src/agents/warren_buffett.py index 34b11dc3b..f749fe02d 100644 --- a/src/agents/warren_buffett.py +++ b/src/agents/warren_buffett.py @@ -5,9 +5,15 @@ import json from typing_extensions import Literal from src.tools.api import get_financial_metrics, get_market_cap, search_line_items +from src.data.providers import get_data_provider_for_agent from src.utils.llm import call_llm from src.utils.progress import progress from src.utils.api_key import get_api_key_from_state +from src.utils.financial_data import ( + safe_get_numeric_field, + calculate_working_capital_change, + validate_required_fields +) class WarrenBuffettSignal(BaseModel): signal: Literal["bullish", "bearish", "neutral"] @@ -21,6 +27,8 @@ def warren_buffett_agent(state: AgentState, agent_id: str = "warren_buffett_agen end_date = data["end_date"] tickers = data["tickers"] api_key = get_api_key_from_state(state, "FINANCIAL_DATASETS_API_KEY") + # Use centralized data provider configuration + data_provider = get_data_provider_for_agent(state, agent_id) # Collect all analysis for LLM reasoning analysis_data = {} buffett_analysis = {} @@ -28,7 +36,7 @@ def warren_buffett_agent(state: AgentState, agent_id: str = "warren_buffett_agen for ticker in tickers: progress.update_status(agent_id, ticker, "Fetching financial metrics") # Fetch required data - request more periods for better trend analysis - metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=10, api_key=api_key) + metrics = get_financial_metrics(ticker, end_date, period="ttm", limit=10, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Gathering financial line items") financial_line_items = search_line_items( @@ -51,11 +59,12 @@ def warren_buffett_agent(state: AgentState, agent_id: str = "warren_buffett_agen period="ttm", limit=10, api_key=api_key, + data_provider=data_provider, ) progress.update_status(agent_id, ticker, "Getting market cap") # Get current market cap - market_cap = get_market_cap(ticker, end_date, api_key=api_key) + market_cap = get_market_cap(ticker, end_date, api_key=api_key, data_provider=data_provider) progress.update_status(agent_id, ticker, "Analyzing fundamentals") # Analyze fundamentals @@ -210,7 +219,7 @@ def analyze_consistency(financial_line_items: list) -> dict[str, any]: reasoning = [] # Check earnings growth trend - earnings_values = [item.net_income for item in financial_line_items if item.net_income] + earnings_values = [getattr(item, 'net_income', None) for item in financial_line_items if getattr(item, 'net_income', None)] if len(earnings_values) >= 4: # Simple check: is each period's earnings bigger than the next? earnings_growth = all(earnings_values[i] > earnings_values[i + 1] for i in range(len(earnings_values) - 1)) @@ -383,36 +392,27 @@ def calculate_owner_earnings(financial_line_items: list) -> dict[str, any]: latest = financial_line_items[0] details = [] - # Core components - net_income = latest.net_income - depreciation = latest.depreciation_and_amortization - capex = latest.capital_expenditure - - if not all([net_income is not None, depreciation is not None, capex is not None]): - missing = [] - if net_income is None: missing.append("net income") - if depreciation is None: missing.append("depreciation") - if capex is None: missing.append("capital expenditure") - return {"owner_earnings": None, "details": [f"Missing components: {', '.join(missing)}"]} + # Core components - use safe field access + required_fields = ["net_income", "depreciation_and_amortization", "capital_expenditure"] + is_valid, missing_fields = validate_required_fields(latest, required_fields) + + if not is_valid: + return {"owner_earnings": None, "details": [f"Missing components: {', '.join(missing_fields)}"]} + + net_income = safe_get_numeric_field(latest, "net_income") + depreciation = safe_get_numeric_field(latest, "depreciation_and_amortization") + capex = safe_get_numeric_field(latest, "capital_expenditure") # Enhanced maintenance capex estimation using historical analysis maintenance_capex = estimate_maintenance_capex(financial_line_items) - # Working capital change analysis (if data available) + # Working capital change analysis - use utility function working_capital_change = 0 if len(financial_line_items) >= 2: try: - current_assets_current = getattr(latest, 'current_assets', None) - current_liab_current = getattr(latest, 'current_liabilities', None) - previous = financial_line_items[1] - current_assets_previous = getattr(previous, 'current_assets', None) - current_liab_previous = getattr(previous, 'current_liabilities', None) - - if all([current_assets_current, current_liab_current, current_assets_previous, current_liab_previous]): - wc_current = current_assets_current - current_liab_current - wc_previous = current_assets_previous - current_liab_previous - working_capital_change = wc_current - wc_previous + working_capital_change = calculate_working_capital_change(latest, previous) + if working_capital_change != 0: details.append(f"Working capital change: ${working_capital_change:,.0f}") except: pass # Skip working capital adjustment if data unavailable @@ -465,14 +465,15 @@ def estimate_maintenance_capex(financial_line_items: list) -> float: capex_ratio = abs(item.capital_expenditure) / item.revenue capex_ratios.append(capex_ratio) - if hasattr(item, 'depreciation_and_amortization') and item.depreciation_and_amortization: - depreciation_values.append(item.depreciation_and_amortization) + depreciation_value = safe_get_numeric_field(item, 'depreciation_and_amortization') + if depreciation_value > 0: + depreciation_values.append(depreciation_value) # Approach 2: Percentage of depreciation (typically 80-120% for maintenance) - latest_depreciation = financial_line_items[0].depreciation_and_amortization if financial_line_items[0].depreciation_and_amortization else 0 + latest_depreciation = safe_get_numeric_field(financial_line_items[0], "depreciation_and_amortization") # Approach 3: Industry-specific heuristics - latest_capex = abs(financial_line_items[0].capital_expenditure) if financial_line_items[0].capital_expenditure else 0 + latest_capex = abs(safe_get_numeric_field(financial_line_items[0], "capital_expenditure")) # Conservative estimate: Use the higher of: # 1. 85% of total capex (assuming 15% is growth capex) diff --git a/src/data/providers.py b/src/data/providers.py new file mode 100644 index 000000000..fddc346ee --- /dev/null +++ b/src/data/providers.py @@ -0,0 +1,98 @@ +"""Data provider configuration and utilities.""" + +from enum import Enum +from pydantic import BaseModel +from typing import Tuple + + +class DataProvider(str, Enum): + """Enum for supported data providers""" + + FINANCIAL_DATASETS = "FinancialDatasets" + YFINANCE = "Yahoo Finance" + + +class DataSourceModel(BaseModel): + """Represents a data source configuration""" + + display_name: str + provider: DataProvider + requires_api_key: bool + description: str + + def to_choice_tuple(self) -> Tuple[str, str]: + """Convert to format needed for questionary choices""" + return (self.display_name, self.provider.value) + + +# Data source configurations +DATA_SOURCES = { + "financial_datasets": DataSourceModel( + display_name="Financial Datasets API (Premium)", + provider=DataProvider.FINANCIAL_DATASETS, + requires_api_key=True, + description="Professional grade financial data with extended coverage" + ), + "yfinance": DataSourceModel( + display_name="Yahoo Finance (Free)", + provider=DataProvider.YFINANCE, + requires_api_key=False, + description="Free financial data from Yahoo Finance" + ), +} + +# Order for display in CLI (yfinance first as default) +DATA_SOURCE_ORDER = [ + ( + DATA_SOURCES["yfinance"].display_name, + "yfinance", + DATA_SOURCES["yfinance"].provider.value + ), + ( + DATA_SOURCES["financial_datasets"].display_name, + "financial_datasets", + DATA_SOURCES["financial_datasets"].provider.value + ), +] + + +def get_data_source_info(provider_key: str) -> DataSourceModel: + """Get data source configuration by key""" + return DATA_SOURCES.get(provider_key) + + +def get_default_data_provider() -> str: + """Get the default data provider for the system""" + return "yfinance" # Default to free option + + +def get_data_provider_for_agent(state: dict, agent_id: str = None) -> str: + """ + Get the appropriate data provider for an agent from state. + Centralizes data provider logic and removes hardcoded defaults from agents. + + Args: + state: Agent state containing metadata + agent_id: Optional agent ID for agent-specific overrides + + Returns: + str: Data provider key (e.g., "yfinance", "financial_datasets") + """ + # Check if data provider is specified in metadata + metadata = state.get("metadata", {}) + data_provider = metadata.get("data_provider") + + if data_provider: + return data_provider + + # Agent-specific defaults (if needed) + agent_specific_defaults = { + "technicals_agent": "financial_datasets", + "technical_analyst_agent": "financial_datasets", + } + + if agent_id and agent_id in agent_specific_defaults: + return agent_specific_defaults[agent_id] + + # System default + return get_default_data_provider() \ No newline at end of file diff --git a/src/llm/models.py b/src/llm/models.py index aea524c49..2d9605227 100644 --- a/src/llm/models.py +++ b/src/llm/models.py @@ -168,6 +168,8 @@ def get_model(model_name: str, model_provider: ModelProvider, api_keys: dict = N return ChatOllama( model=model_name, base_url=base_url, + timeout=120, # 2 minute timeout to prevent hanging + temperature=0.1, # Lower temperature for more consistent JSON output ) elif model_provider == ModelProvider.OPENROUTER: api_key = (api_keys or {}).get("OPENROUTER_API_KEY") or os.getenv("OPENROUTER_API_KEY") diff --git a/src/main.py b/src/main.py index 4df62891b..01acdcfe7 100644 --- a/src/main.py +++ b/src/main.py @@ -13,6 +13,7 @@ from src.utils.progress import progress from src.llm.models import LLM_ORDER, OLLAMA_LLM_ORDER, get_model_info, ModelProvider from src.utils.ollama import ensure_ollama_and_model +from src.data.providers import DATA_SOURCE_ORDER, get_data_source_info import argparse from datetime import datetime @@ -51,6 +52,7 @@ def run_hedge_fund( selected_analysts: list[str] = [], model_name: str = "gpt-4.1", model_provider: str = "OpenAI", + data_provider: str = "yfinance", ): # Start progress tracking progress.start() @@ -81,6 +83,7 @@ def run_hedge_fund( "show_reasoning": show_reasoning, "model_name": model_name, "model_provider": model_provider, + "data_provider": data_provider, }, }, ) @@ -176,6 +179,28 @@ def create_workflow(selected_analysts=None): selected_analysts = choices print(f"\nSelected analysts: {', '.join(Fore.GREEN + choice.title().replace('_', ' ') + Style.RESET_ALL for choice in choices)}\n") + # Select data provider + data_provider_choice = questionary.select( + "Select your data provider:", + choices=[questionary.Choice(display, value=key) for display, key, provider in DATA_SOURCE_ORDER], + style=questionary.Style( + [ + ("selected", "fg:cyan bold"), + ("pointer", "fg:cyan bold"), + ("highlighted", "fg:cyan"), + ("answer", "fg:cyan bold"), + ] + ), + ).ask() + + if not data_provider_choice: + print("\n\nInterrupt received. Exiting...") + sys.exit(0) + + data_source_info = get_data_source_info(data_provider_choice) + print(f"\nSelected data provider: {Fore.CYAN + Style.BRIGHT}{data_source_info.display_name}{Style.RESET_ALL}") + print(f"Description: {data_source_info.description}\n") + # Select LLM model based on whether Ollama is being used model_name = "" model_provider = "" @@ -317,5 +342,6 @@ def create_workflow(selected_analysts=None): selected_analysts=selected_analysts, model_name=model_name, model_provider=model_provider, + data_provider=data_provider_choice, ) print_trading_output(result) diff --git a/src/tools/api.py b/src/tools/api.py index 60ccbdc9b..53bc68fbf 100644 --- a/src/tools/api.py +++ b/src/tools/api.py @@ -3,8 +3,14 @@ import pandas as pd import requests import time +try: + import yfinance as yf + YFINANCE_AVAILABLE = True +except ImportError: + YFINANCE_AVAILABLE = False from src.data.cache import get_cache +from src.data.providers import DataProvider from src.data.models import ( CompanyNews, CompanyNewsResponse, @@ -57,29 +63,64 @@ def _make_api_request(url: str, headers: dict, method: str = "GET", json_data: d return response -def get_prices(ticker: str, start_date: str, end_date: str, api_key: str = None) -> list[Price]: +def get_prices_yfinance(ticker: str, start_date: str, end_date: str) -> list[Price]: + """Fetch price data from Yahoo Finance using yfinance.""" + if not YFINANCE_AVAILABLE: + raise Exception("yfinance is not installed. Please install it with: pip install yfinance") + + try: + # Download data from yfinance + data = yf.download(ticker, start=start_date, end=end_date, progress=False, auto_adjust=False) + + if data.empty: + return [] + + # Convert to our Price model format + prices = [] + for date_idx, row in data.iterrows(): + price = Price( + open=row['Open'], + high=row['High'], + low=row['Low'], + close=row['Close'], + volume=row['Volume'], + time=date_idx.strftime('%Y-%m-%d') + ) + prices.append(price) + + return prices + + except Exception as e: + raise Exception(f"Error fetching data from Yahoo Finance for {ticker}: {str(e)}") + + +def get_prices(ticker: str, start_date: str, end_date: str, api_key: str = None, data_provider: str = "financial_datasets") -> list[Price]: """Fetch price data from cache or API.""" # Create a cache key that includes all parameters to ensure exact matches - cache_key = f"{ticker}_{start_date}_{end_date}" + cache_key = f"{ticker}_{start_date}_{end_date}_{data_provider}" # Check cache first - simple exact match if cached_data := _cache.get_prices(cache_key): return [Price(**price) for price in cached_data] - # If not in cache, fetch from API - headers = {} - financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") - if financial_api_key: - headers["X-API-KEY"] = financial_api_key + # Route to appropriate data provider + if data_provider == "yfinance": + prices = get_prices_yfinance(ticker, start_date, end_date) + else: + # Default to Financial Datasets API + headers = {} + financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") + if financial_api_key: + headers["X-API-KEY"] = financial_api_key - url = f"https://api.financialdatasets.ai/prices/?ticker={ticker}&interval=day&interval_multiplier=1&start_date={start_date}&end_date={end_date}" - response = _make_api_request(url, headers) - if response.status_code != 200: - raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") + url = f"https://api.financialdatasets.ai/prices/?ticker={ticker}&interval=day&interval_multiplier=1&start_date={start_date}&end_date={end_date}" + response = _make_api_request(url, headers) + if response.status_code != 200: + raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") - # Parse response with Pydantic model - price_response = PriceResponse(**response.json()) - prices = price_response.prices + # Parse response with Pydantic model + price_response = PriceResponse(**response.json()) + prices = price_response.prices if not prices: return [] @@ -89,35 +130,150 @@ def get_prices(ticker: str, start_date: str, end_date: str, api_key: str = None) return prices +def get_financial_metrics_yfinance(ticker: str, end_date: str = None) -> list[FinancialMetrics]: + """Fetch financial metrics from Yahoo Finance using yfinance.""" + if not YFINANCE_AVAILABLE: + raise Exception("yfinance is not installed. Please install it with: pip install yfinance") + + try: + ticker_obj = yf.Ticker(ticker) + info = ticker_obj.info + + # Defensive check: ensure info is a dict and not None + if not info or not isinstance(info, dict): + raise Exception(f"No valid info data returned for ticker {ticker}") + + # Helper function to safely get numeric values + def safe_get_numeric(key: str, default=None): + """Safely get numeric values from info dict, handling None cases.""" + value = info.get(key, default) + # Ensure we don't pass None where a number is expected for calculations + if value is None: + return None + try: + # Ensure it's a valid number + return float(value) if value is not None else None + except (ValueError, TypeError): + return None + + # Helper function to safely get string values + def safe_get_string(key: str, default='USD'): + """Safely get string values from info dict.""" + value = info.get(key, default) + if value is None: + return default + try: + str_value = str(value) + # Ensure the string is not empty and contains valid characters + return str_value if str_value and str_value.strip() else default + except (ValueError, TypeError): + return default + + # Create a single FinancialMetrics object with available data + metric = FinancialMetrics( + ticker=ticker, + report_period="ttm", + period="ttm", + currency=safe_get_string('currency', 'USD'), + + # Valuation metrics + market_cap=safe_get_numeric('marketCap'), + enterprise_value=safe_get_numeric('enterpriseValue'), + price_to_earnings_ratio=safe_get_numeric('trailingPE'), + price_to_book_ratio=safe_get_numeric('priceToBook'), + price_to_sales_ratio=safe_get_numeric('priceToSalesTrailing12Months'), + enterprise_value_to_ebitda_ratio=safe_get_numeric('enterpriseToEbitda'), + enterprise_value_to_revenue_ratio=safe_get_numeric('enterpriseToRevenue'), + + # Set unavailable fields to None + free_cash_flow_yield=None, + peg_ratio=safe_get_numeric('pegRatio'), + + # Profitability metrics + gross_margin=safe_get_numeric('grossMargins'), + operating_margin=safe_get_numeric('operatingMargins'), + net_margin=safe_get_numeric('profitMargins'), # profit_margin is same as net_margin + + # Returns + return_on_equity=safe_get_numeric('returnOnEquity'), + return_on_assets=safe_get_numeric('returnOnAssets'), + return_on_invested_capital=None, # Not available in yfinance + + # Efficiency metrics (not available in yfinance info) + asset_turnover=None, + inventory_turnover=None, + receivables_turnover=None, + days_sales_outstanding=None, + operating_cycle=None, + working_capital_turnover=None, + + # Liquidity metrics + current_ratio=safe_get_numeric('currentRatio'), + quick_ratio=safe_get_numeric('quickRatio'), + cash_ratio=None, # Not available in yfinance + operating_cash_flow_ratio=None, # Not available in yfinance + + # Leverage metrics + debt_to_equity=safe_get_numeric('debtToEquity'), + debt_to_assets=None, # Not available in yfinance + interest_coverage=None, # Not available in yfinance + + # Growth metrics + revenue_growth=safe_get_numeric('revenueGrowth'), + earnings_growth=safe_get_numeric('earningsGrowth'), + book_value_growth=None, # Not available in yfinance + earnings_per_share_growth=None, # Not available in yfinance + free_cash_flow_growth=None, # Not available in yfinance + operating_income_growth=None, # Not available in yfinance + ebitda_growth=None, # Not available in yfinance + + # Other metrics + payout_ratio=safe_get_numeric('payoutRatio'), + earnings_per_share=safe_get_numeric('trailingEps'), + book_value_per_share=safe_get_numeric('bookValue'), + free_cash_flow_per_share=None, # Not available in yfinance + ) + + return [metric] if metric else [] + + except Exception as e: + raise Exception(f"Error fetching financial metrics from Yahoo Finance for {ticker}: {str(e)}") + + def get_financial_metrics( ticker: str, end_date: str, period: str = "ttm", limit: int = 10, api_key: str = None, + data_provider: str = "financial_datasets", ) -> list[FinancialMetrics]: """Fetch financial metrics from cache or API.""" # Create a cache key that includes all parameters to ensure exact matches - cache_key = f"{ticker}_{period}_{end_date}_{limit}" + cache_key = f"{ticker}_{period}_{end_date}_{limit}_{data_provider}" # Check cache first - simple exact match if cached_data := _cache.get_financial_metrics(cache_key): return [FinancialMetrics(**metric) for metric in cached_data] - # If not in cache, fetch from API - headers = {} - financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") - if financial_api_key: - headers["X-API-KEY"] = financial_api_key + # Route to appropriate data provider + if data_provider == "yfinance": + financial_metrics = get_financial_metrics_yfinance(ticker, end_date) + else: + # Default to Financial Datasets API + headers = {} + financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") + if financial_api_key: + headers["X-API-KEY"] = financial_api_key - url = f"https://api.financialdatasets.ai/financial-metrics/?ticker={ticker}&report_period_lte={end_date}&limit={limit}&period={period}" - response = _make_api_request(url, headers) - if response.status_code != 200: - raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") + url = f"https://api.financialdatasets.ai/financial-metrics/?ticker={ticker}&report_period_lte={end_date}&limit={limit}&period={period}" + response = _make_api_request(url, headers) + if response.status_code != 200: + raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") - # Parse response with Pydantic model - metrics_response = FinancialMetricsResponse(**response.json()) - financial_metrics = metrics_response.financial_metrics + # Parse response with Pydantic model + metrics_response = FinancialMetricsResponse(**response.json()) + financial_metrics = metrics_response.financial_metrics if not financial_metrics: return [] @@ -127,6 +283,136 @@ def get_financial_metrics( return financial_metrics +def search_line_items_yfinance( + ticker: str, + line_items: list[str], + end_date: str = None, + period: str = "ttm", + limit: int = 10, +) -> list[LineItem]: + """Fetch line items from Yahoo Finance using yfinance.""" + if not YFINANCE_AVAILABLE: + raise Exception("yfinance is not installed. Please install it with: pip install yfinance") + + try: + ticker_obj = yf.Ticker(ticker) + + # Get financial statements based on period + if period == "ttm" or period == "annual": + income_stmt = ticker_obj.income_stmt + balance_sheet = ticker_obj.balance_sheet + cashflow = ticker_obj.cashflow + else: + income_stmt = ticker_obj.quarterly_income_stmt + balance_sheet = ticker_obj.quarterly_balance_sheet + cashflow = ticker_obj.quarterly_cashflow + + # Mapping of common line items to yfinance field names + line_item_mapping = { + # Revenue & Income + "revenue": "Total Revenue", + "total_revenue": "Total Revenue", + "net_income": "Net Income", + "operating_income": "Operating Income", + "ebit": "EBIT", + "ebitda": "EBITDA", + "gross_margin": "Gross Profit", + "operating_margin": "Operating Income", + + # Per Share Metrics + "earnings_per_share": "Diluted EPS", + "book_value_per_share": "Stockholders Equity", + "outstanding_shares": "Share Issued", + + # Balance Sheet Items + "total_assets": "Total Assets", + "total_liabilities": "Total Liabilities Net Minority Interest", + "current_assets": "Current Assets", + "current_liabilities": "Current Liabilities", + "cash_and_equivalents": "Cash And Cash Equivalents", + "total_debt": "Total Debt", + "goodwill_and_intangible_assets": "Goodwill And Other Intangible Assets", + "intangible_assets": "Other Intangible Assets", + "shareholders_equity": "Stockholders Equity", + + # Cash Flow Items + "free_cash_flow": "Free Cash Flow", + "capital_expenditure": "Capital Expenditure", + "depreciation_and_amortization": "Depreciation And Amortization", + "dividends_and_other_cash_distributions": "Cash Dividends Paid", + "issuance_or_purchase_of_equity_shares": "Common Stock Issuance", + + # Expense Items + "operating_expense": "Total Expenses", + "research_and_development": "Research And Development", + "interest_expense": "Interest Expense", + + # Financial Ratios & Metrics (these come from financial metrics API, not line items) + "debt_to_equity": "Total Debt", # Will need calculation + "asset_turnover": None, # Not available in line items + "beta": None, # Not available in line items + "ev_to_ebit": None, # Not available in line items + "return_on_invested_capital": None, # Not available in line items + } + + # Get all unique periods from the data + all_periods = set() + if income_stmt is not None and not income_stmt.empty: + all_periods.update(income_stmt.columns) + if balance_sheet is not None and not balance_sheet.empty: + all_periods.update(balance_sheet.columns) + if cashflow is not None and not cashflow.empty: + all_periods.update(cashflow.columns) + + # Sort periods (most recent first) + sorted_periods = sorted(all_periods, reverse=True)[:limit] + + results = [] + + for period_date in sorted_periods: + line_item_data = { + "ticker": ticker, + "report_period": str(period_date.date()) if hasattr(period_date, 'date') else str(period_date), + "period": period, + "currency": 'USD', # Default currency + } + + # Collect all requested line items for this period + for item in line_items: + yfinance_name = line_item_mapping.get(item, item) + value = None + + # Skip if yfinance_name is None (not available in yfinance) + if yfinance_name is None: + continue + + # Search in income statement first + if income_stmt is not None and yfinance_name in income_stmt.index and period_date in income_stmt.columns: + value = income_stmt.loc[yfinance_name, period_date] + + # Then balance sheet + elif balance_sheet is not None and yfinance_name in balance_sheet.index and period_date in balance_sheet.columns: + value = balance_sheet.loc[yfinance_name, period_date] + + # Then cash flow + elif cashflow is not None and yfinance_name in cashflow.index and period_date in cashflow.columns: + value = cashflow.loc[yfinance_name, period_date] + + # Add to line_item_data if value is valid + if value is not None and not pd.isna(value): + line_item_data[item] = float(value) + + # Only create LineItem if we have at least one valid value + if len(line_item_data) > 4: # More than just the base fields + line_item = LineItem(**line_item_data) + results.append(line_item) + + return results + + except Exception as e: + raise Exception(f"Error fetching line items from Yahoo Finance for {ticker}: {str(e)}") + + def search_line_items( ticker: str, line_items: list[str], @@ -134,34 +420,84 @@ def search_line_items( period: str = "ttm", limit: int = 10, api_key: str = None, + data_provider: str = "financial_datasets", ) -> list[LineItem]: - """Fetch line items from API.""" - # If not in cache or insufficient data, fetch from API - headers = {} - financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") - if financial_api_key: - headers["X-API-KEY"] = financial_api_key - - url = "https://api.financialdatasets.ai/financials/search/line-items" - - body = { - "tickers": [ticker], - "line_items": line_items, - "end_date": end_date, - "period": period, - "limit": limit, - } - response = _make_api_request(url, headers, method="POST", json_data=body) - if response.status_code != 200: - raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") - data = response.json() - response_model = LineItemResponse(**data) - search_results = response_model.search_results - if not search_results: - return [] + """Fetch line items from cache or API.""" + # Route to appropriate data provider + if data_provider == "yfinance": + return search_line_items_yfinance(ticker, line_items, end_date, period, limit) + else: + # Default to Financial Datasets API + headers = {} + financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") + if financial_api_key: + headers["X-API-KEY"] = financial_api_key - # Cache the results - return search_results[:limit] + url = "https://api.financialdatasets.ai/financials/search/line-items" + + body = { + "tickers": [ticker], + "line_items": line_items, + "end_date": end_date, + "period": period, + "limit": limit, + } + response = _make_api_request(url, headers, method="POST", json_data=body) + if response.status_code != 200: + raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") + data = response.json() + response_model = LineItemResponse(**data) + search_results = response_model.search_results + if not search_results: + return [] + + # Cache the results + return search_results[:limit] + + +def get_insider_trades_yfinance( + ticker: str, + end_date: str = None, + start_date: str = None, + limit: int = 1000, +) -> list[InsiderTrade]: + """Fetch insider trades from Yahoo Finance using yfinance.""" + if not YFINANCE_AVAILABLE: + raise Exception("yfinance is not installed. Please install it with: pip install yfinance") + + try: + ticker_obj = yf.Ticker(ticker) + insider_data = ticker_obj.insider_transactions + + if insider_data is None or insider_data.empty: + return [] + + trades = [] + for _, row in insider_data.iterrows(): + # Basic data mapping - yfinance has limited insider trade details + trade = InsiderTrade( + ticker=ticker, + issuer=None, # Not available in yfinance + name=str(row.get('Insider', '')), + title=str(row.get('Position', '')), + is_board_director=None, # Not available in yfinance + transaction_date=str(row.get('Start Date', '')), + transaction_shares=int(row.get('Shares', 0)) if pd.notnull(row.get('Shares')) else 0, + transaction_price_per_share=None, # Not available in yfinance + transaction_value=float(row.get('Value', 0)) if pd.notnull(row.get('Value')) else 0.0, + shares_owned_before_transaction=None, # Not available in yfinance + shares_owned_after_transaction=None, # Not available in yfinance + security_title=None, # Not available in yfinance + filing_date=str(row.get('Start Date', '')), # Use same date for filing + ) + trades.append(trade) + + return trades[:limit] + + except Exception as e: + # Return empty list instead of raising exception for non-critical data + print(f"Warning: Could not fetch insider trades from Yahoo Finance for {ticker}: {str(e)}") + return [] def get_insider_trades( @@ -170,53 +506,58 @@ def get_insider_trades( start_date: str | None = None, limit: int = 1000, api_key: str = None, + data_provider: str = "financial_datasets", ) -> list[InsiderTrade]: """Fetch insider trades from cache or API.""" # Create a cache key that includes all parameters to ensure exact matches - cache_key = f"{ticker}_{start_date or 'none'}_{end_date}_{limit}" + cache_key = f"{ticker}_{start_date or 'none'}_{end_date}_{limit}_{data_provider}" # Check cache first - simple exact match if cached_data := _cache.get_insider_trades(cache_key): return [InsiderTrade(**trade) for trade in cached_data] - # If not in cache, fetch from API - headers = {} - financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") - if financial_api_key: - headers["X-API-KEY"] = financial_api_key + # Route to appropriate data provider + if data_provider == "yfinance": + all_trades = get_insider_trades_yfinance(ticker, end_date, start_date, limit) + else: + # Default to Financial Datasets API + headers = {} + financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") + if financial_api_key: + headers["X-API-KEY"] = financial_api_key - all_trades = [] - current_end_date = end_date + all_trades = [] + current_end_date = end_date - while True: - url = f"https://api.financialdatasets.ai/insider-trades/?ticker={ticker}&filing_date_lte={current_end_date}" - if start_date: - url += f"&filing_date_gte={start_date}" - url += f"&limit={limit}" + while True: + url = f"https://api.financialdatasets.ai/insider-trades/?ticker={ticker}&filing_date_lte={current_end_date}" + if start_date: + url += f"&filing_date_gte={start_date}" + url += f"&limit={limit}" - response = _make_api_request(url, headers) - if response.status_code != 200: - raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") + response = _make_api_request(url, headers) + if response.status_code != 200: + raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") - data = response.json() - response_model = InsiderTradeResponse(**data) - insider_trades = response_model.insider_trades + data = response.json() + response_model = InsiderTradeResponse(**data) + insider_trades = response_model.insider_trades - if not insider_trades: - break + if not insider_trades: + break - all_trades.extend(insider_trades) + all_trades.extend(insider_trades) - # Only continue pagination if we have a start_date and got a full page - if not start_date or len(insider_trades) < limit: - break + # Only continue pagination if we have a start_date and got a full page + if not start_date or len(insider_trades) < limit: + break - # Update end_date to the oldest filing date from current batch for next iteration - current_end_date = min(trade.filing_date for trade in insider_trades).split("T")[0] + # Update end_date to the oldest filing date from current batch for next iteration + current_end_date = min(trade.filing_date for trade in insider_trades).split("T")[0] - # If we've reached or passed the start_date, we can stop - if current_end_date <= start_date: - break + # If we've reached or passed the start_date, we can stop + if current_end_date <= start_date: + break if not all_trades: return [] @@ -226,102 +567,168 @@ def get_insider_trades( return all_trades +def get_company_news_yfinance( + ticker: str, + end_date: str = None, + start_date: str = None, + limit: int = 1000, +) -> list[CompanyNews]: + """Fetch company news from Yahoo Finance using yfinance.""" + if not YFINANCE_AVAILABLE: + raise Exception("yfinance is not installed. Please install it with: pip install yfinance") + + try: + ticker_obj = yf.Ticker(ticker) + news_data = ticker_obj.news + + if not news_data: + return [] + + news_items = [] + for item in news_data[:limit]: + content = item.get('content', {}) + if content: + news = CompanyNews( + ticker=ticker, + title=content.get('title', ''), + author='', # yfinance doesn't provide author info + source=content.get('publisher', {}).get('name', '') if isinstance(content.get('publisher'), dict) else str(content.get('publisher', '')), + url=content.get('clickThroughUrl', {}).get('url', '') if isinstance(content.get('clickThroughUrl'), dict) else str(content.get('clickThroughUrl', '')), + date=content.get('publishedAt', ''), + sentiment='neutral' # yfinance doesn't provide sentiment + ) + news_items.append(news) + + return news_items + + except Exception as e: + # Return empty list instead of raising exception for non-critical data + print(f"Warning: Could not fetch company news from Yahoo Finance for {ticker}: {str(e)}") + return [] + + def get_company_news( ticker: str, end_date: str, start_date: str | None = None, limit: int = 1000, api_key: str = None, + data_provider: str = "financial_datasets", ) -> list[CompanyNews]: """Fetch company news from cache or API.""" # Create a cache key that includes all parameters to ensure exact matches - cache_key = f"{ticker}_{start_date or 'none'}_{end_date}_{limit}" + cache_key = f"{ticker}_{start_date or 'none'}_{end_date}_{limit}_{data_provider}" # Check cache first - simple exact match if cached_data := _cache.get_company_news(cache_key): return [CompanyNews(**news) for news in cached_data] - # If not in cache, fetch from API - headers = {} - financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") - if financial_api_key: - headers["X-API-KEY"] = financial_api_key + # Route to appropriate data provider + if data_provider == "yfinance": + all_news = get_company_news_yfinance(ticker, end_date, start_date, limit) + else: + # Default to Financial Datasets API + headers = {} + financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") + if financial_api_key: + headers["X-API-KEY"] = financial_api_key - all_news = [] - current_end_date = end_date + all_news = [] + current_end_date = end_date - while True: - url = f"https://api.financialdatasets.ai/news/?ticker={ticker}&end_date={current_end_date}" - if start_date: - url += f"&start_date={start_date}" - url += f"&limit={limit}" + while True: + url = f"https://api.financialdatasets.ai/news/?ticker={ticker}&end_date={current_end_date}" + if start_date: + url += f"&start_date={start_date}" + url += f"&limit={limit}" - response = _make_api_request(url, headers) - if response.status_code != 200: - raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") + response = _make_api_request(url, headers) + if response.status_code != 200: + raise Exception(f"Error fetching data: {ticker} - {response.status_code} - {response.text}") - data = response.json() - response_model = CompanyNewsResponse(**data) - company_news = response_model.news + data = response.json() + response_model = CompanyNewsResponse(**data) + company_news = response_model.news - if not company_news: - break + if not company_news: + break - all_news.extend(company_news) + all_news.extend(company_news) - # Only continue pagination if we have a start_date and got a full page - if not start_date or len(company_news) < limit: - break + # Only continue pagination if we have a start_date and got a full page + if not start_date or len(company_news) < limit: + break - # Update end_date to the oldest date from current batch for next iteration - current_end_date = min(news.date for news in company_news).split("T")[0] + # Update end_date to the oldest date from current batch for next iteration + current_end_date = min(news.date for news in company_news).split("T")[0] - # If we've reached or passed the start_date, we can stop - if current_end_date <= start_date: - break + # If we've reached or passed the start_date, we can stop + if current_end_date <= start_date: + break - if not all_news: - return [] + if not all_news: + return [] # Cache the results using the comprehensive cache key _cache.set_company_news(cache_key, [news.model_dump() for news in all_news]) return all_news +def get_market_cap_yfinance(ticker: str, end_date: str = None) -> float | None: + """Fetch market cap from Yahoo Finance using yfinance.""" + if not YFINANCE_AVAILABLE: + raise Exception("yfinance is not installed. Please install it with: pip install yfinance") + + try: + ticker_obj = yf.Ticker(ticker) + info = ticker_obj.info + return info.get('marketCap') + + except Exception as e: + print(f"Warning: Could not fetch market cap from Yahoo Finance for {ticker}: {str(e)}") + return None + + def get_market_cap( ticker: str, end_date: str, api_key: str = None, + data_provider: str = "financial_datasets", ) -> float | None: """Fetch market cap from the API.""" - # Check if end_date is today - if end_date == datetime.datetime.now().strftime("%Y-%m-%d"): - # Get the market cap from company facts API - headers = {} - financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") - if financial_api_key: - headers["X-API-KEY"] = financial_api_key - - url = f"https://api.financialdatasets.ai/company/facts/?ticker={ticker}" - response = _make_api_request(url, headers) - if response.status_code != 200: - print(f"Error fetching company facts: {ticker} - {response.status_code}") + # Route to appropriate data provider + if data_provider == "yfinance": + return get_market_cap_yfinance(ticker, end_date) + else: + # Default to Financial Datasets API + # Check if end_date is today + if end_date == datetime.datetime.now().strftime("%Y-%m-%d"): + # Get the market cap from company facts API + headers = {} + financial_api_key = api_key or os.environ.get("FINANCIAL_DATASETS_API_KEY") + if financial_api_key: + headers["X-API-KEY"] = financial_api_key + + url = f"https://api.financialdatasets.ai/company/facts/?ticker={ticker}" + response = _make_api_request(url, headers) + if response.status_code != 200: + print(f"Error fetching company facts: {ticker} - {response.status_code}") + return None + + data = response.json() + response_model = CompanyFactsResponse(**data) + return response_model.company_facts.market_cap + + financial_metrics = get_financial_metrics(ticker, end_date, api_key=api_key, data_provider=data_provider) + if not financial_metrics: return None - data = response.json() - response_model = CompanyFactsResponse(**data) - return response_model.company_facts.market_cap - - financial_metrics = get_financial_metrics(ticker, end_date, api_key=api_key) - if not financial_metrics: - return None + market_cap = financial_metrics[0].market_cap - market_cap = financial_metrics[0].market_cap - - if not market_cap: - return None + if not market_cap: + return None - return market_cap + return market_cap def prices_to_df(prices: list[Price]) -> pd.DataFrame: diff --git a/src/utils/financial_data.py b/src/utils/financial_data.py new file mode 100644 index 000000000..4dd2f0fe5 --- /dev/null +++ b/src/utils/financial_data.py @@ -0,0 +1,125 @@ +"""Utilities for safe financial data access.""" + +from typing import Any, Union, Optional +from src.data.models import LineItem, FinancialMetrics + + +def safe_get_field( + data_item: Union[LineItem, FinancialMetrics], + field_name: str, + default: Any = None +) -> Any: + """ + Safely get a field from a financial data item with proper fallback. + + Args: + data_item: LineItem or FinancialMetrics object + field_name: Name of the field to access + default: Default value if field doesn't exist or is None + + Returns: + Field value or default + """ + try: + value = getattr(data_item, field_name, default) + # Return default for None, NaN, or empty values + if value is None or (isinstance(value, float) and value != value): # NaN check + return default + return value + except (AttributeError, TypeError): + return default + + +def safe_get_numeric_field( + data_item: Union[LineItem, FinancialMetrics], + field_name: str, + default: float = 0.0 +) -> float: + """ + Safely get a numeric field with 0.0 as default. + + Args: + data_item: LineItem or FinancialMetrics object + field_name: Name of the field to access + default: Default numeric value (defaults to 0.0) + + Returns: + Numeric field value or default + """ + value = safe_get_field(data_item, field_name, default) + try: + return float(value) if value is not None else default + except (ValueError, TypeError): + return default + + +def calculate_working_capital_change( + current_item: LineItem, + previous_item: Optional[LineItem] = None +) -> float: + """ + Calculate working capital change between periods. + + Args: + current_item: Current period financial data + previous_item: Previous period financial data (optional) + + Returns: + Working capital change (0 if previous period not available) + """ + if previous_item is None: + return 0.0 + + current_wc = ( + safe_get_numeric_field(current_item, 'current_assets') - + safe_get_numeric_field(current_item, 'current_liabilities') + ) + + previous_wc = ( + safe_get_numeric_field(previous_item, 'current_assets') - + safe_get_numeric_field(previous_item, 'current_liabilities') + ) + + return current_wc - previous_wc + + +def calculate_shareholders_equity(data_item: Union[LineItem, FinancialMetrics]) -> float: + """ + Calculate shareholders equity from total assets and liabilities. + + Args: + data_item: Financial data item + + Returns: + Shareholders equity or 0 if cannot be calculated + """ + total_assets = safe_get_numeric_field(data_item, 'total_assets') + total_liabilities = safe_get_numeric_field(data_item, 'total_liabilities') + + if total_assets > 0 and total_liabilities >= 0: + return total_assets - total_liabilities + return 0.0 + + +def validate_required_fields( + data_item: Union[LineItem, FinancialMetrics], + required_fields: list[str] +) -> tuple[bool, list[str]]: + """ + Validate that required fields are present and not None. + + Args: + data_item: Financial data item to validate + required_fields: List of required field names + + Returns: + Tuple of (is_valid, missing_fields) + """ + missing_fields = [] + + for field in required_fields: + value = safe_get_field(data_item, field) + if value is None: + missing_fields.append(field) + + return len(missing_fields) == 0, missing_fields \ No newline at end of file diff --git a/src/utils/llm.py b/src/utils/llm.py index c7535d5d2..8f14cc838 100644 --- a/src/utils/llm.py +++ b/src/utils/llm.py @@ -48,8 +48,8 @@ def call_llm( model_info = get_model_info(model_name, model_provider) llm = get_model(model_name, model_provider, api_keys) - # For non-JSON support models, we can use structured output - if not (model_info and not model_info.has_json_mode()): + # For models that support JSON mode, use structured output + if model_info and model_info.has_json_mode(): llm = llm.with_structured_output( pydantic_model, method="json_mode", @@ -61,13 +61,28 @@ def call_llm( # Call the LLM result = llm.invoke(prompt) - # For non-JSON support models, we need to extract and parse the JSON manually + # For models without JSON support, we need to extract and parse the JSON manually if model_info and not model_info.has_json_mode(): parsed_result = extract_json_from_response(result.content) if parsed_result: return pydantic_model(**parsed_result) + else: + # If we couldn't parse JSON, raise an exception to trigger retry + raise Exception("Could not extract valid JSON from response") else: - return result + # For models with JSON support, check if result is already a Pydantic model + if isinstance(result, pydantic_model): + return result + elif hasattr(result, 'content'): + # If it's an AIMessage or similar, extract the content and parse it + parsed_result = extract_json_from_response(result.content) + if parsed_result: + return pydantic_model(**parsed_result) + else: + raise Exception("Could not extract valid JSON from structured output response") + else: + # If it's neither, try to use it directly as a Pydantic model + return result except Exception as e: if agent_name: @@ -107,17 +122,82 @@ def create_default_response(model_class: type[BaseModel]) -> BaseModel: def extract_json_from_response(content: str) -> dict | None: - """Extracts JSON from markdown-formatted response.""" + """Extracts JSON from markdown-formatted response with improved error handling.""" + if not content: + return None + try: - json_start = content.find("```json") + # Try to parse the entire content as JSON first (in case it's pure JSON) + try: + return json.loads(content.strip()) + except json.JSONDecodeError: + pass + + # Try to find JSON block with multiple patterns + patterns = ["```json", "```JSON", "json```", "```"] + json_start = -1 + pattern_used = "" + + for pattern in patterns: + idx = content.find(pattern) + if idx != -1: + json_start = idx + pattern_used = pattern + break + if json_start != -1: - json_text = content[json_start + 7 :] # Skip past ```json + # Skip past the pattern + json_text = content[json_start + len(pattern_used):] + + # Find the end marker json_end = json_text.find("```") if json_end != -1: - json_text = json_text[:json_end].strip() + json_text = json_text[:json_end] + + # Clean up the text + json_text = json_text.strip() + + # Remove any leading/trailing text that isn't JSON + # Find the first { or [ + first_brace = json_text.find("{") + first_bracket = json_text.find("[") + + start_pos = -1 + if first_brace != -1 and (first_bracket == -1 or first_brace < first_bracket): + start_pos = first_brace + elif first_bracket != -1: + start_pos = first_bracket + + if start_pos != -1: + json_text = json_text[start_pos:] + + # Find the last } or ] + last_brace = json_text.rfind("}") + last_bracket = json_text.rfind("]") + + end_pos = -1 + if last_brace != -1 and (last_bracket == -1 or last_brace > last_bracket): + end_pos = last_brace + 1 + elif last_bracket != -1: + end_pos = last_bracket + 1 + + if end_pos != -1: + json_text = json_text[:end_pos] + + if json_text: + return json.loads(json_text) + + # If no markdown blocks found, try to extract JSON directly + first_brace = content.find("{") + if first_brace != -1: + last_brace = content.rfind("}") + if last_brace != -1 and last_brace > first_brace: + json_text = content[first_brace:last_brace + 1] return json.loads(json_text) + except Exception as e: print(f"Error extracting JSON from response: {e}") + print(f"Response content: {repr(content[:500])}") # Show first 500 chars for debugging return None