From f1cbcba9dcf45e5d474a0dc3e8f59e6f6c945787 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 16:14:59 +0000 Subject: [PATCH] SDK regeneration --- poetry.lock | 233 ++++----- reference.md | 138 +++++- src/cohere/__init__.py | 196 ++++---- src/cohere/base_client.py | 32 +- src/cohere/types/__init__.py | 64 ++- src/cohere/types/embed_response.py | 6 +- .../types/generate_streamed_response.py | 10 +- src/cohere/types/message.py | 10 +- src/cohere/types/response_format.py | 6 +- src/cohere/types/streamed_chat_response.py | 32 +- src/cohere/v2/__init__.py | 126 +++-- src/cohere/v2/client.py | 463 ++++++++++++++++-- src/cohere/v2/types/__init__.py | 126 +++-- .../types/assistant_message_content_item.py | 4 +- ...assistant_message_response_content_item.py | 4 +- src/cohere/v2/types/chat_message2.py | 15 +- src/cohere/v2/types/citation_options.py | 28 ++ src/cohere/v2/types/citation_options_mode.py | 5 + src/cohere/v2/types/content.py | 4 +- src/cohere/v2/types/document.py | 33 ++ src/cohere/v2/types/document_content.py | 24 + src/cohere/v2/types/images.py | 50 ++ src/cohere/v2/types/response_format2.py | 6 +- src/cohere/v2/types/source.py | 6 +- .../v2/types/streamed_chat_response2.py | 44 +- .../v2/types/system_message_content_item.py | 6 +- src/cohere/v2/types/texts.py | 62 +++ src/cohere/v2/types/texts_truncate.py | 5 + src/cohere/v2/types/tool_content.py | 51 ++ src/cohere/v2/types/tool_message2.py | 7 +- .../v2/types/tool_message2tool_content.py | 6 + src/cohere/v2/types/user_message.py | 8 +- .../v2/types/v2chat_request_citation_mode.py | 5 - .../v2/types/v2chat_request_documents_item.py | 6 + .../v2chat_stream_request_citation_mode.py | 5 - .../v2chat_stream_request_documents_item.py | 6 + src/cohere/v2/types/v2embed_request.py | 107 ++++ 37 files changed, 1424 insertions(+), 515 deletions(-) create mode 100644 src/cohere/v2/types/citation_options.py create mode 100644 src/cohere/v2/types/citation_options_mode.py create mode 100644 src/cohere/v2/types/document.py create mode 100644 src/cohere/v2/types/document_content.py create mode 100644 src/cohere/v2/types/images.py create mode 100644 src/cohere/v2/types/texts.py create mode 100644 src/cohere/v2/types/texts_truncate.py create mode 100644 src/cohere/v2/types/tool_content.py create mode 100644 src/cohere/v2/types/tool_message2tool_content.py delete mode 100644 src/cohere/v2/types/v2chat_request_citation_mode.py create mode 100644 src/cohere/v2/types/v2chat_request_documents_item.py delete mode 100644 src/cohere/v2/types/v2chat_stream_request_citation_mode.py create mode 100644 src/cohere/v2/types/v2chat_stream_request_documents_item.py create mode 100644 src/cohere/v2/types/v2embed_request.py diff --git a/poetry.lock b/poetry.lock index 500cafff0..cb6a4db62 100644 --- a/poetry.lock +++ b/poetry.lock @@ -38,17 +38,17 @@ trio = ["trio (>=0.23)"] [[package]] name = "boto3" -version = "1.35.18" +version = "1.35.21" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.35.18-py3-none-any.whl", hash = "sha256:71e237d3997cf93425947854d7b121c577944f391ba633afb0659e1015364704"}, - {file = "boto3-1.35.18.tar.gz", hash = "sha256:fd130308f1f49d748a5fc63de92de79a995b51c79af3947ddde8815fcf0684fe"}, + {file = "boto3-1.35.21-py3-none-any.whl", hash = "sha256:247f88eedce9ae4e014a8fc14a9473759bb8e391460d49396a3b600fb649f33b"}, + {file = "boto3-1.35.21.tar.gz", hash = "sha256:db5fbbd10248db060f2ccce3ae17764f1641c99c8b9f51d422c26ebe25703a1e"}, ] [package.dependencies] -botocore = ">=1.35.18,<1.36.0" +botocore = ">=1.35.21,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -57,13 +57,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.35.18" +version = "1.35.21" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.18-py3-none-any.whl", hash = "sha256:1027083aeb1fe74057273410fd768e018e22f85adfbd717b5a69f578f7812b80"}, - {file = "botocore-1.35.18.tar.gz", hash = "sha256:e59da8b91ab06683d2725b6cbbb0383b30c68a241c3c63363f4c5bff59b3c0c0"}, + {file = "botocore-1.35.21-py3-none-any.whl", hash = "sha256:3db9ddfe521edc0753fc8c68caef71c7806e1d2d21ce8cbabc2065b7d79192f2"}, + {file = "botocore-1.35.21.tar.gz", hash = "sha256:db917e7d7b3a2eed1310c6496784bc813c91f020a021c2ab5f9df7d28cdb4f1d"}, ] [package.dependencies] @@ -260,18 +260,18 @@ zstandard = ["zstandard"] [[package]] name = "filelock" -version = "3.16.0" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.16.0-py3-none-any.whl", hash = "sha256:f6ed4c963184f4c84dd5557ce8fece759a3724b37b80c6c4f20a2f63a4dc6609"}, - {file = "filelock-3.16.0.tar.gz", hash = "sha256:81de9eb8453c769b63369f87f11131a7ab04e367f8d97ad39dc230daa07e3bec"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.1.1)", "pytest (>=8.3.2)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.3)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] typing = ["typing-extensions (>=4.12.2)"] [[package]] @@ -383,13 +383,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.24.7" +version = "0.25.0" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.24.7-py3-none-any.whl", hash = "sha256:a212c555324c8a7b1ffdd07266bb7e7d69ca71aa238d27b7842d65e9a26ac3e5"}, - {file = "huggingface_hub-0.24.7.tar.gz", hash = "sha256:0ad8fb756e2831da0ac0491175b960f341fe06ebcf80ed6f8728313f95fc0207"}, + {file = "huggingface_hub-0.25.0-py3-none-any.whl", hash = "sha256:e2f357b35d72d5012cfd127108c4e14abcd61ba4ebc90a5a374dc2456cb34e12"}, + {file = "huggingface_hub-0.25.0.tar.gz", hash = "sha256:fb5fbe6c12fcd99d187ec7db95db9110fb1a20505f23040a5449a717c1a0db4d"}, ] [package.dependencies] @@ -417,15 +417,18 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t [[package]] name = "idna" -version = "3.8" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, - {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "iniconfig" version = "2.0.0" @@ -547,18 +550,18 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.9.1" +version = "2.9.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, - {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.3" +pydantic-core = "2.23.4" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -570,100 +573,100 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.23.3" +version = "2.23.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, - {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, - {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, - {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, - {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, - {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, - {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, - {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, - {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, - {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, - {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, - {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, - {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, - {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, ] [package.dependencies] @@ -1047,13 +1050,13 @@ types-urllib3 = "*" [[package]] name = "types-requests" -version = "2.32.0.20240907" +version = "2.32.0.20240914" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240907.tar.gz", hash = "sha256:ff33935f061b5e81ec87997e91050f7b4af4f82027a7a7a9d9aaea04a963fdf8"}, - {file = "types_requests-2.32.0.20240907-py3-none-any.whl", hash = "sha256:1d1e79faeaf9d42def77f3c304893dea17a97cae98168ac69f3cb465516ee8da"}, + {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, + {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, ] [package.dependencies] diff --git a/reference.md b/reference.md index 9257bd953..92d0ad3c2 100644 --- a/reference.md +++ b/reference.md @@ -12,7 +12,7 @@
Generates a text response to a user message. -To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). +To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
@@ -28,11 +28,11 @@ To learn how to use the Chat API with Streaming and RAG follow our [Text Generat ```python from cohere import ( + ChatbotMessage, ChatConnector, ChatStreamRequestConnectorsSearchOptions, Client, - Message_Chatbot, - ResponseFormat_Text, + TextResponseFormat, Tool, ToolCall, ToolParameterDefinitionsValue, @@ -48,7 +48,7 @@ response = client.chat_stream( model="string", preamble="string", chat_history=[ - Message_Chatbot( + ChatbotMessage( message="string", tool_calls=[ ToolCall( @@ -108,7 +108,7 @@ response = client.chat_stream( ) ], force_single_step=True, - response_format=ResponseFormat_Text(), + response_format=TextResponseFormat(), safety_mode="CONTEXTUAL", ) for chunk in response: @@ -571,7 +571,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
Generates a text response to a user message. -To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). +To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
@@ -586,7 +586,7 @@ To learn how to use the Chat API with Streaming and RAG follow our [Text Generat
```python -from cohere import Client, Message_Tool +from cohere import Client, ToolMessage client = Client( client_name="YOUR_CLIENT_NAME", @@ -594,7 +594,7 @@ client = Client( ) client.chat( message="Can you give me a global market overview of solar panels?", - chat_history=[Message_Tool(), Message_Tool()], + chat_history=[ToolMessage(), ToolMessage()], prompt_truncation="OFF", temperature=0.3, ) @@ -2336,10 +2336,11 @@ Generates a message from the model in response to a provided conversation. To le ```python from cohere import Client from cohere.v2 import ( - ChatMessage2_User, - ResponseFormat2_Text, + CitationOptions, + TextResponseFormat2, Tool2, Tool2Function, + UserChatMessage2, ) client = Client( @@ -2349,9 +2350,8 @@ client = Client( response = client.v2.chat_stream( model="string", messages=[ - ChatMessage2_User( + UserChatMessage2( content="string", - documents=[{"string": {"key": "value"}}], ) ], tools=[ @@ -2363,8 +2363,11 @@ response = client.v2.chat_stream( ), ) ], - citation_mode="FAST", - response_format=ResponseFormat2_Text(), + documents=["string"], + citation_options=CitationOptions( + mode="FAST", + ), + response_format=TextResponseFormat2(), safety_mode="CONTEXTUAL", max_tokens=1, stop_sequences=["string"], @@ -2422,11 +2425,16 @@ When `tools` is passed (without `tool_results`), the `text` content in the respo
-**citation_mode:** `typing.Optional[V2ChatStreamRequestCitationMode]` +**documents:** `typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. -Defaults to `"accurate"`. -Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + +
+
+
+
+ +**citation_options:** `typing.Optional[CitationOptions]`
@@ -2603,7 +2611,7 @@ Generates a message from the model in response to a provided conversation. To le ```python from cohere import Client -from cohere.v2 import ChatMessage2_Tool +from cohere.v2 import ToolChatMessage2 client = Client( client_name="YOUR_CLIENT_NAME", @@ -2612,9 +2620,9 @@ client = Client( client.v2.chat( model="model", messages=[ - ChatMessage2_Tool( + ToolChatMessage2( tool_call_id="messages", - tool_content=["messages"], + tool_content="messages", ) ], ) @@ -2662,11 +2670,16 @@ When `tools` is passed (without `tool_results`), the `text` content in the respo
-**citation_mode:** `typing.Optional[V2ChatRequestCitationMode]` +**documents:** `typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. -Defaults to `"accurate"`. -Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + +
+
+ +
+
+**citation_options:** `typing.Optional[CitationOptions]`
@@ -2811,6 +2824,85 @@ Defaults to `0.75`. min value of `0.01`, max value of `0.99`. +
+ + + +
client.v2.embed(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. + +Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. + +If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client +from cohere.v2 import ImageV2EmbedRequest + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.v2.embed( + request=ImageV2EmbedRequest( + images=["string"], + model="string", + ), +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `V2EmbedRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
diff --git a/src/cohere/__init__.py b/src/cohere/__init__.py index adad8ada0..c8b3bc8eb 100644 --- a/src/cohere/__init__.py +++ b/src/cohere/__init__.py @@ -32,7 +32,9 @@ ChatTextGenerationEvent, ChatToolCallsChunkEvent, ChatToolCallsGenerationEvent, + ChatbotMessage, CheckApiKeyResponse, + CitationGenerationStreamedChatResponse, ClassifyDataMetrics, ClassifyExample, ClassifyRequestTruncate, @@ -64,9 +66,9 @@ EmbedJobTruncate, EmbedRequestTruncate, EmbedResponse, - EmbedResponse_EmbeddingsByType, - EmbedResponse_EmbeddingsFloats, EmbeddingType, + EmbeddingsByTypeEmbedResponse, + EmbeddingsFloatsEmbedResponse, FinetuneDatasetMetrics, FinishReason, GatewayTimeoutErrorBody, @@ -80,22 +82,16 @@ GenerateStreamRequestTruncate, GenerateStreamText, GenerateStreamedResponse, - GenerateStreamedResponse_StreamEnd, - GenerateStreamedResponse_StreamError, - GenerateStreamedResponse_TextGeneration, Generation, GetConnectorResponse, GetModelResponse, + JsonObjectResponseFormat, JsonResponseFormat, LabelMetric, ListConnectorsResponse, ListEmbedJobResponse, ListModelsResponse, Message, - Message_Chatbot, - Message_System, - Message_Tool, - Message_User, Metrics, MetricsEmbedData, MetricsEmbedDataFieldsItem, @@ -110,35 +106,37 @@ RerankResponseResultsItemDocument, RerankerDataMetrics, ResponseFormat, - ResponseFormat_JsonObject, - ResponseFormat_Text, + SearchQueriesGenerationStreamedChatResponse, + SearchResultsStreamedChatResponse, SingleGeneration, SingleGenerationInStream, SingleGenerationTokenLikelihoodsItem, + StreamEndGenerateStreamedResponse, + StreamEndStreamedChatResponse, + StreamErrorGenerateStreamedResponse, + StreamStartStreamedChatResponse, StreamedChatResponse, - StreamedChatResponse_CitationGeneration, - StreamedChatResponse_SearchQueriesGeneration, - StreamedChatResponse_SearchResults, - StreamedChatResponse_StreamEnd, - StreamedChatResponse_StreamStart, - StreamedChatResponse_TextGeneration, - StreamedChatResponse_ToolCallsChunk, - StreamedChatResponse_ToolCallsGeneration, SummarizeRequestExtractiveness, SummarizeRequestFormat, SummarizeRequestLength, SummarizeResponse, + SystemMessage, + TextGenerationGenerateStreamedResponse, + TextGenerationStreamedChatResponse, TextResponseFormat, TokenizeResponse, TooManyRequestsErrorBody, Tool, ToolCall, ToolCallDelta, + ToolCallsChunkStreamedChatResponse, + ToolCallsGenerationStreamedChatResponse, ToolMessage, ToolParameterDefinitionsValue, ToolResult, UnprocessableEntityErrorBody, UpdateConnectorResponse, + UserMessage, ) from .errors import ( BadRequestError, @@ -169,13 +167,12 @@ from .environment import ClientEnvironment from .sagemaker_client import SagemakerClient from .v2 import ( + AssistantChatMessage2, AssistantMessage, AssistantMessageContent, AssistantMessageContentItem, - AssistantMessageContentItem_Text, AssistantMessageResponse, AssistantMessageResponseContentItem, - AssistantMessageResponseContentItem_Text, ChatContentDeltaEvent, ChatContentDeltaEventDelta, ChatContentDeltaEventDeltaMessage, @@ -187,10 +184,6 @@ ChatContentStartEventDeltaMessageContent, ChatFinishReason, ChatMessage2, - ChatMessage2_Assistant, - ChatMessage2_System, - ChatMessage2_Tool, - ChatMessage2_User, ChatMessageEndEvent, ChatMessageEndEventDelta, ChatMessageStartEvent, @@ -211,53 +204,71 @@ ChatToolPlanDeltaEventDelta, Citation, CitationEndEvent, + CitationEndStreamedChatResponse2, + CitationOptions, + CitationOptionsMode, CitationStartEvent, CitationStartEventDelta, CitationStartEventDeltaMessage, + CitationStartStreamedChatResponse2, + ClassificationV2EmbedRequest, + ClusteringV2EmbedRequest, Content, - Content_Text, + ContentDeltaStreamedChatResponse2, + ContentEndStreamedChatResponse2, + ContentStartStreamedChatResponse2, + Document, + DocumentContent, DocumentSource, + DocumentToolContent, + ImageV2EmbedRequest, + Images, + JsonObjectResponseFormat2, JsonResponseFormat2, + MessageEndStreamedChatResponse2, + MessageStartStreamedChatResponse2, NonStreamedChatResponse2, ResponseFormat2, - ResponseFormat2_JsonObject, - ResponseFormat2_Text, + SearchDocumentV2EmbedRequest, + SearchQueryV2EmbedRequest, Source, - Source_Document, - Source_Tool, StreamedChatResponse2, - StreamedChatResponse2_CitationEnd, - StreamedChatResponse2_CitationStart, - StreamedChatResponse2_ContentDelta, - StreamedChatResponse2_ContentEnd, - StreamedChatResponse2_ContentStart, - StreamedChatResponse2_MessageEnd, - StreamedChatResponse2_MessageStart, - StreamedChatResponse2_ToolCallDelta, - StreamedChatResponse2_ToolCallEnd, - StreamedChatResponse2_ToolCallStart, - StreamedChatResponse2_ToolPlanDelta, + SystemChatMessage2, SystemMessage, SystemMessageContent, SystemMessageContentItem, - SystemMessageContentItem_Text, + TextAssistantMessageContentItem, + TextAssistantMessageResponseContentItem, TextContent, TextResponseFormat2, + TextSystemMessageContentItem, + TextToolContent, + Texts, + TextsTruncate, Tool2, Tool2Function, ToolCall2, ToolCall2Function, + ToolCallDeltaStreamedChatResponse2, + ToolCallEndStreamedChatResponse2, + ToolCallStartStreamedChatResponse2, + ToolChatMessage2, + ToolContent, ToolMessage2, + ToolMessage2ToolContent, + ToolPlanDeltaStreamedChatResponse2, ToolSource, Usage, UsageBilledUnits, UsageTokens, + UserChatMessage2, UserMessage, UserMessageContent, - V2ChatRequestCitationMode, + V2ChatRequestDocumentsItem, V2ChatRequestSafetyMode, - V2ChatStreamRequestCitationMode, + V2ChatStreamRequestDocumentsItem, V2ChatStreamRequestSafetyMode, + V2EmbedRequest, ) from .version import __version__ @@ -266,13 +277,12 @@ "ApiMetaApiVersion", "ApiMetaBilledUnits", "ApiMetaTokens", + "AssistantChatMessage2", "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentItem", - "AssistantMessageContentItem_Text", "AssistantMessageResponse", "AssistantMessageResponseContentItem", - "AssistantMessageResponseContentItem_Text", "AsyncClient", "AsyncClientV2", "AuthTokenType", @@ -296,10 +306,6 @@ "ChatFinishReason", "ChatMessage", "ChatMessage2", - "ChatMessage2_Assistant", - "ChatMessage2_System", - "ChatMessage2_Tool", - "ChatMessage2_User", "ChatMessageEndEvent", "ChatMessageEndEventDelta", "ChatMessageStartEvent", @@ -338,12 +344,19 @@ "ChatToolCallsGenerationEvent", "ChatToolPlanDeltaEvent", "ChatToolPlanDeltaEventDelta", + "ChatbotMessage", "CheckApiKeyResponse", "Citation", "CitationEndEvent", + "CitationEndStreamedChatResponse2", + "CitationGenerationStreamedChatResponse", + "CitationOptions", + "CitationOptionsMode", "CitationStartEvent", "CitationStartEventDelta", "CitationStartEventDeltaMessage", + "CitationStartStreamedChatResponse2", + "ClassificationV2EmbedRequest", "ClassifyDataMetrics", "ClassifyExample", "ClassifyRequestTruncate", @@ -356,12 +369,15 @@ "ClientClosedRequestErrorBody", "ClientEnvironment", "ClientV2", + "ClusteringV2EmbedRequest", "CompatibleEndpoint", "Connector", "ConnectorAuthStatus", "ConnectorOAuth", "Content", - "Content_Text", + "ContentDeltaStreamedChatResponse2", + "ContentEndStreamedChatResponse2", + "ContentStartStreamedChatResponse2", "CreateConnectorOAuth", "CreateConnectorResponse", "CreateConnectorServiceAuth", @@ -378,7 +394,10 @@ "DatasetsListResponse", "DeleteConnectorResponse", "DetokenizeResponse", + "Document", + "DocumentContent", "DocumentSource", + "DocumentToolContent", "EmbedByTypeResponse", "EmbedByTypeResponseEmbeddings", "EmbedFloatsResponse", @@ -388,9 +407,9 @@ "EmbedJobTruncate", "EmbedRequestTruncate", "EmbedResponse", - "EmbedResponse_EmbeddingsByType", - "EmbedResponse_EmbeddingsFloats", "EmbeddingType", + "EmbeddingsByTypeEmbedResponse", + "EmbeddingsFloatsEmbedResponse", "FinetuneDatasetMetrics", "FinishReason", "ForbiddenError", @@ -406,13 +425,14 @@ "GenerateStreamRequestTruncate", "GenerateStreamText", "GenerateStreamedResponse", - "GenerateStreamedResponse_StreamEnd", - "GenerateStreamedResponse_StreamError", - "GenerateStreamedResponse_TextGeneration", "Generation", "GetConnectorResponse", "GetModelResponse", + "ImageV2EmbedRequest", + "Images", "InternalServerError", + "JsonObjectResponseFormat", + "JsonObjectResponseFormat2", "JsonResponseFormat", "JsonResponseFormat2", "LabelMetric", @@ -420,10 +440,8 @@ "ListEmbedJobResponse", "ListModelsResponse", "Message", - "Message_Chatbot", - "Message_System", - "Message_Tool", - "Message_User", + "MessageEndStreamedChatResponse2", + "MessageStartStreamedChatResponse2", "Metrics", "MetricsEmbedData", "MetricsEmbedDataFieldsItem", @@ -442,50 +460,41 @@ "RerankerDataMetrics", "ResponseFormat", "ResponseFormat2", - "ResponseFormat2_JsonObject", - "ResponseFormat2_Text", - "ResponseFormat_JsonObject", - "ResponseFormat_Text", "SagemakerClient", + "SearchDocumentV2EmbedRequest", + "SearchQueriesGenerationStreamedChatResponse", + "SearchQueryV2EmbedRequest", + "SearchResultsStreamedChatResponse", "ServiceUnavailableError", "SingleGeneration", "SingleGenerationInStream", "SingleGenerationTokenLikelihoodsItem", "Source", - "Source_Document", - "Source_Tool", + "StreamEndGenerateStreamedResponse", + "StreamEndStreamedChatResponse", + "StreamErrorGenerateStreamedResponse", + "StreamStartStreamedChatResponse", "StreamedChatResponse", "StreamedChatResponse2", - "StreamedChatResponse2_CitationEnd", - "StreamedChatResponse2_CitationStart", - "StreamedChatResponse2_ContentDelta", - "StreamedChatResponse2_ContentEnd", - "StreamedChatResponse2_ContentStart", - "StreamedChatResponse2_MessageEnd", - "StreamedChatResponse2_MessageStart", - "StreamedChatResponse2_ToolCallDelta", - "StreamedChatResponse2_ToolCallEnd", - "StreamedChatResponse2_ToolCallStart", - "StreamedChatResponse2_ToolPlanDelta", - "StreamedChatResponse_CitationGeneration", - "StreamedChatResponse_SearchQueriesGeneration", - "StreamedChatResponse_SearchResults", - "StreamedChatResponse_StreamEnd", - "StreamedChatResponse_StreamStart", - "StreamedChatResponse_TextGeneration", - "StreamedChatResponse_ToolCallsChunk", - "StreamedChatResponse_ToolCallsGeneration", "SummarizeRequestExtractiveness", "SummarizeRequestFormat", "SummarizeRequestLength", "SummarizeResponse", + "SystemChatMessage2", "SystemMessage", "SystemMessageContent", "SystemMessageContentItem", - "SystemMessageContentItem_Text", + "TextAssistantMessageContentItem", + "TextAssistantMessageResponseContentItem", "TextContent", + "TextGenerationGenerateStreamedResponse", + "TextGenerationStreamedChatResponse", "TextResponseFormat", "TextResponseFormat2", + "TextSystemMessageContentItem", + "TextToolContent", + "Texts", + "TextsTruncate", "TokenizeResponse", "TooManyRequestsError", "TooManyRequestsErrorBody", @@ -496,9 +505,18 @@ "ToolCall2", "ToolCall2Function", "ToolCallDelta", + "ToolCallDeltaStreamedChatResponse2", + "ToolCallEndStreamedChatResponse2", + "ToolCallStartStreamedChatResponse2", + "ToolCallsChunkStreamedChatResponse", + "ToolCallsGenerationStreamedChatResponse", + "ToolChatMessage2", + "ToolContent", "ToolMessage", "ToolMessage2", + "ToolMessage2ToolContent", "ToolParameterDefinitionsValue", + "ToolPlanDeltaStreamedChatResponse2", "ToolResult", "ToolSource", "UnauthorizedError", @@ -508,12 +526,14 @@ "Usage", "UsageBilledUnits", "UsageTokens", + "UserChatMessage2", "UserMessage", "UserMessageContent", - "V2ChatRequestCitationMode", + "V2ChatRequestDocumentsItem", "V2ChatRequestSafetyMode", - "V2ChatStreamRequestCitationMode", + "V2ChatStreamRequestDocumentsItem", "V2ChatStreamRequestSafetyMode", + "V2EmbedRequest", "__version__", "connectors", "datasets", diff --git a/src/cohere/base_client.py b/src/cohere/base_client.py index 910b0dafb..24039b71c 100644 --- a/src/cohere/base_client.py +++ b/src/cohere/base_client.py @@ -186,7 +186,7 @@ def chat_stream( ) -> typing.Iterator[StreamedChatResponse]: """ Generates a text response to a user message. - To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). + To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). Parameters ---------- @@ -437,11 +437,11 @@ def chat_stream( Examples -------- from cohere import ( + ChatbotMessage, ChatConnector, ChatStreamRequestConnectorsSearchOptions, Client, - Message_Chatbot, - ResponseFormat_Text, + TextResponseFormat, Tool, ToolCall, ToolParameterDefinitionsValue, @@ -457,7 +457,7 @@ def chat_stream( model="string", preamble="string", chat_history=[ - Message_Chatbot( + ChatbotMessage( message="string", tool_calls=[ ToolCall( @@ -517,7 +517,7 @@ def chat_stream( ) ], force_single_step=True, - response_format=ResponseFormat_Text(), + response_format=TextResponseFormat(), safety_mode="CONTEXTUAL", ) for chunk in response: @@ -737,7 +737,7 @@ def chat( ) -> NonStreamedChatResponse: """ Generates a text response to a user message. - To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). + To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). Parameters ---------- @@ -987,7 +987,7 @@ def chat( Examples -------- - from cohere import Client, Message_Tool + from cohere import Client, ToolMessage client = Client( client_name="YOUR_CLIENT_NAME", @@ -995,7 +995,7 @@ def chat( ) client.chat( message="Can you give me a global market overview of solar panels?", - chat_history=[Message_Tool(), Message_Tool()], + chat_history=[ToolMessage(), ToolMessage()], prompt_truncation="OFF", temperature=0.3, ) @@ -3181,7 +3181,7 @@ async def chat_stream( ) -> typing.AsyncIterator[StreamedChatResponse]: """ Generates a text response to a user message. - To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). + To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). Parameters ---------- @@ -3435,10 +3435,10 @@ async def chat_stream( from cohere import ( AsyncClient, + ChatbotMessage, ChatConnector, ChatStreamRequestConnectorsSearchOptions, - Message_Chatbot, - ResponseFormat_Text, + TextResponseFormat, Tool, ToolCall, ToolParameterDefinitionsValue, @@ -3457,7 +3457,7 @@ async def main() -> None: model="string", preamble="string", chat_history=[ - Message_Chatbot( + ChatbotMessage( message="string", tool_calls=[ ToolCall( @@ -3517,7 +3517,7 @@ async def main() -> None: ) ], force_single_step=True, - response_format=ResponseFormat_Text(), + response_format=TextResponseFormat(), safety_mode="CONTEXTUAL", ) async for chunk in response: @@ -3740,7 +3740,7 @@ async def chat( ) -> NonStreamedChatResponse: """ Generates a text response to a user message. - To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). + To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). Parameters ---------- @@ -3992,7 +3992,7 @@ async def chat( -------- import asyncio - from cohere import AsyncClient, Message_Tool + from cohere import AsyncClient, ToolMessage client = AsyncClient( client_name="YOUR_CLIENT_NAME", @@ -4003,7 +4003,7 @@ async def chat( async def main() -> None: await client.chat( message="Can you give me a global market overview of solar panels?", - chat_history=[Message_Tool(), Message_Tool()], + chat_history=[ToolMessage(), ToolMessage()], prompt_truncation="OFF", temperature=0.3, ) diff --git a/src/cohere/types/__init__.py b/src/cohere/types/__init__.py index ea6d61264..baccf436c 100644 --- a/src/cohere/types/__init__.py +++ b/src/cohere/types/__init__.py @@ -64,7 +64,7 @@ from .embed_job_status import EmbedJobStatus from .embed_job_truncate import EmbedJobTruncate from .embed_request_truncate import EmbedRequestTruncate -from .embed_response import EmbedResponse, EmbedResponse_EmbeddingsByType, EmbedResponse_EmbeddingsFloats +from .embed_response import EmbedResponse, EmbeddingsByTypeEmbedResponse, EmbeddingsFloatsEmbedResponse from .embedding_type import EmbeddingType from .finetune_dataset_metrics import FinetuneDatasetMetrics from .finish_reason import FinishReason @@ -80,9 +80,9 @@ from .generate_stream_text import GenerateStreamText from .generate_streamed_response import ( GenerateStreamedResponse, - GenerateStreamedResponse_StreamEnd, - GenerateStreamedResponse_StreamError, - GenerateStreamedResponse_TextGeneration, + StreamEndGenerateStreamedResponse, + StreamErrorGenerateStreamedResponse, + TextGenerationGenerateStreamedResponse, ) from .generation import Generation from .get_connector_response import GetConnectorResponse @@ -92,7 +92,7 @@ from .list_connectors_response import ListConnectorsResponse from .list_embed_job_response import ListEmbedJobResponse from .list_models_response import ListModelsResponse -from .message import Message, Message_Chatbot, Message_System, Message_Tool, Message_User +from .message import ChatbotMessage, Message, SystemMessage, ToolMessage, UserMessage from .metrics import Metrics from .metrics_embed_data import MetricsEmbedData from .metrics_embed_data_fields_item import MetricsEmbedDataFieldsItem @@ -106,20 +106,20 @@ from .rerank_response_results_item import RerankResponseResultsItem from .rerank_response_results_item_document import RerankResponseResultsItemDocument from .reranker_data_metrics import RerankerDataMetrics -from .response_format import ResponseFormat, ResponseFormat_JsonObject, ResponseFormat_Text +from .response_format import JsonObjectResponseFormat, ResponseFormat, TextResponseFormat from .single_generation import SingleGeneration from .single_generation_in_stream import SingleGenerationInStream from .single_generation_token_likelihoods_item import SingleGenerationTokenLikelihoodsItem from .streamed_chat_response import ( + CitationGenerationStreamedChatResponse, + SearchQueriesGenerationStreamedChatResponse, + SearchResultsStreamedChatResponse, + StreamEndStreamedChatResponse, + StreamStartStreamedChatResponse, StreamedChatResponse, - StreamedChatResponse_CitationGeneration, - StreamedChatResponse_SearchQueriesGeneration, - StreamedChatResponse_SearchResults, - StreamedChatResponse_StreamEnd, - StreamedChatResponse_StreamStart, - StreamedChatResponse_TextGeneration, - StreamedChatResponse_ToolCallsChunk, - StreamedChatResponse_ToolCallsGeneration, + TextGenerationStreamedChatResponse, + ToolCallsChunkStreamedChatResponse, + ToolCallsGenerationStreamedChatResponse, ) from .summarize_request_extractiveness import SummarizeRequestExtractiveness from .summarize_request_format import SummarizeRequestFormat @@ -169,7 +169,9 @@ "ChatTextGenerationEvent", "ChatToolCallsChunkEvent", "ChatToolCallsGenerationEvent", + "ChatbotMessage", "CheckApiKeyResponse", + "CitationGenerationStreamedChatResponse", "ClassifyDataMetrics", "ClassifyExample", "ClassifyRequestTruncate", @@ -201,9 +203,9 @@ "EmbedJobTruncate", "EmbedRequestTruncate", "EmbedResponse", - "EmbedResponse_EmbeddingsByType", - "EmbedResponse_EmbeddingsFloats", "EmbeddingType", + "EmbeddingsByTypeEmbedResponse", + "EmbeddingsFloatsEmbedResponse", "FinetuneDatasetMetrics", "FinishReason", "GatewayTimeoutErrorBody", @@ -217,22 +219,16 @@ "GenerateStreamRequestTruncate", "GenerateStreamText", "GenerateStreamedResponse", - "GenerateStreamedResponse_StreamEnd", - "GenerateStreamedResponse_StreamError", - "GenerateStreamedResponse_TextGeneration", "Generation", "GetConnectorResponse", "GetModelResponse", + "JsonObjectResponseFormat", "JsonResponseFormat", "LabelMetric", "ListConnectorsResponse", "ListEmbedJobResponse", "ListModelsResponse", "Message", - "Message_Chatbot", - "Message_System", - "Message_Tool", - "Message_User", "Metrics", "MetricsEmbedData", "MetricsEmbedDataFieldsItem", @@ -247,33 +243,35 @@ "RerankResponseResultsItemDocument", "RerankerDataMetrics", "ResponseFormat", - "ResponseFormat_JsonObject", - "ResponseFormat_Text", + "SearchQueriesGenerationStreamedChatResponse", + "SearchResultsStreamedChatResponse", "SingleGeneration", "SingleGenerationInStream", "SingleGenerationTokenLikelihoodsItem", + "StreamEndGenerateStreamedResponse", + "StreamEndStreamedChatResponse", + "StreamErrorGenerateStreamedResponse", + "StreamStartStreamedChatResponse", "StreamedChatResponse", - "StreamedChatResponse_CitationGeneration", - "StreamedChatResponse_SearchQueriesGeneration", - "StreamedChatResponse_SearchResults", - "StreamedChatResponse_StreamEnd", - "StreamedChatResponse_StreamStart", - "StreamedChatResponse_TextGeneration", - "StreamedChatResponse_ToolCallsChunk", - "StreamedChatResponse_ToolCallsGeneration", "SummarizeRequestExtractiveness", "SummarizeRequestFormat", "SummarizeRequestLength", "SummarizeResponse", + "SystemMessage", + "TextGenerationGenerateStreamedResponse", + "TextGenerationStreamedChatResponse", "TextResponseFormat", "TokenizeResponse", "TooManyRequestsErrorBody", "Tool", "ToolCall", "ToolCallDelta", + "ToolCallsChunkStreamedChatResponse", + "ToolCallsGenerationStreamedChatResponse", "ToolMessage", "ToolParameterDefinitionsValue", "ToolResult", "UnprocessableEntityErrorBody", "UpdateConnectorResponse", + "UserMessage", ] diff --git a/src/cohere/types/embed_response.py b/src/cohere/types/embed_response.py index d1d6083f1..d6e142daf 100644 --- a/src/cohere/types/embed_response.py +++ b/src/cohere/types/embed_response.py @@ -11,7 +11,7 @@ from ..core.unchecked_base_model import UnionMetadata -class EmbedResponse_EmbeddingsFloats(UncheckedBaseModel): +class EmbeddingsFloatsEmbedResponse(UncheckedBaseModel): response_type: typing.Literal["embeddings_floats"] = "embeddings_floats" id: str embeddings: typing.List[typing.List[float]] @@ -28,7 +28,7 @@ class Config: extra = pydantic.Extra.allow -class EmbedResponse_EmbeddingsByType(UncheckedBaseModel): +class EmbeddingsByTypeEmbedResponse(UncheckedBaseModel): response_type: typing.Literal["embeddings_by_type"] = "embeddings_by_type" id: str embeddings: EmbedByTypeResponseEmbeddings @@ -46,6 +46,6 @@ class Config: EmbedResponse = typing_extensions.Annotated[ - typing.Union[EmbedResponse_EmbeddingsFloats, EmbedResponse_EmbeddingsByType], + typing.Union[EmbeddingsFloatsEmbedResponse, EmbeddingsByTypeEmbedResponse], UnionMetadata(discriminant="response_type"), ] diff --git a/src/cohere/types/generate_streamed_response.py b/src/cohere/types/generate_streamed_response.py index d6b74f4bc..a7694cbbe 100644 --- a/src/cohere/types/generate_streamed_response.py +++ b/src/cohere/types/generate_streamed_response.py @@ -11,7 +11,7 @@ from ..core.unchecked_base_model import UnionMetadata -class GenerateStreamedResponse_TextGeneration(UncheckedBaseModel): +class TextGenerationGenerateStreamedResponse(UncheckedBaseModel): """ Response in content type stream when `stream` is `true` in the request parameters. Generation tokens are streamed with the GenerationStream response. The final response is of type GenerationFinalResponse. """ @@ -31,7 +31,7 @@ class Config: extra = pydantic.Extra.allow -class GenerateStreamedResponse_StreamEnd(UncheckedBaseModel): +class StreamEndGenerateStreamedResponse(UncheckedBaseModel): """ Response in content type stream when `stream` is `true` in the request parameters. Generation tokens are streamed with the GenerationStream response. The final response is of type GenerationFinalResponse. """ @@ -51,7 +51,7 @@ class Config: extra = pydantic.Extra.allow -class GenerateStreamedResponse_StreamError(UncheckedBaseModel): +class StreamErrorGenerateStreamedResponse(UncheckedBaseModel): """ Response in content type stream when `stream` is `true` in the request parameters. Generation tokens are streamed with the GenerationStream response. The final response is of type GenerationFinalResponse. """ @@ -74,9 +74,7 @@ class Config: GenerateStreamedResponse = typing_extensions.Annotated[ typing.Union[ - GenerateStreamedResponse_TextGeneration, - GenerateStreamedResponse_StreamEnd, - GenerateStreamedResponse_StreamError, + TextGenerationGenerateStreamedResponse, StreamEndGenerateStreamedResponse, StreamErrorGenerateStreamedResponse ], UnionMetadata(discriminant="event_type"), ] diff --git a/src/cohere/types/message.py b/src/cohere/types/message.py index d4edc57e8..cdb690afb 100644 --- a/src/cohere/types/message.py +++ b/src/cohere/types/message.py @@ -11,7 +11,7 @@ from ..core.unchecked_base_model import UnionMetadata -class Message_Chatbot(UncheckedBaseModel): +class ChatbotMessage(UncheckedBaseModel): role: typing.Literal["CHATBOT"] = "CHATBOT" message: str tool_calls: typing.Optional[typing.List[ToolCall]] = None @@ -26,7 +26,7 @@ class Config: extra = pydantic.Extra.allow -class Message_System(UncheckedBaseModel): +class SystemMessage(UncheckedBaseModel): role: typing.Literal["SYSTEM"] = "SYSTEM" message: str tool_calls: typing.Optional[typing.List[ToolCall]] = None @@ -41,7 +41,7 @@ class Config: extra = pydantic.Extra.allow -class Message_User(UncheckedBaseModel): +class UserMessage(UncheckedBaseModel): role: typing.Literal["USER"] = "USER" message: str tool_calls: typing.Optional[typing.List[ToolCall]] = None @@ -56,7 +56,7 @@ class Config: extra = pydantic.Extra.allow -class Message_Tool(UncheckedBaseModel): +class ToolMessage(UncheckedBaseModel): role: typing.Literal["TOOL"] = "TOOL" tool_results: typing.Optional[typing.List[ToolResult]] = None @@ -71,5 +71,5 @@ class Config: Message = typing_extensions.Annotated[ - typing.Union[Message_Chatbot, Message_System, Message_User, Message_Tool], UnionMetadata(discriminant="role") + typing.Union[ChatbotMessage, SystemMessage, UserMessage, ToolMessage], UnionMetadata(discriminant="role") ] diff --git a/src/cohere/types/response_format.py b/src/cohere/types/response_format.py index 458244ff5..dae43927f 100644 --- a/src/cohere/types/response_format.py +++ b/src/cohere/types/response_format.py @@ -10,7 +10,7 @@ from ..core.unchecked_base_model import UnionMetadata -class ResponseFormat_Text(UncheckedBaseModel): +class TextResponseFormat(UncheckedBaseModel): """ Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models. @@ -34,7 +34,7 @@ class Config: extra = pydantic.Extra.allow -class ResponseFormat_JsonObject(UncheckedBaseModel): +class JsonObjectResponseFormat(UncheckedBaseModel): """ Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models. @@ -62,5 +62,5 @@ class Config: ResponseFormat = typing_extensions.Annotated[ - typing.Union[ResponseFormat_Text, ResponseFormat_JsonObject], UnionMetadata(discriminant="type") + typing.Union[TextResponseFormat, JsonObjectResponseFormat], UnionMetadata(discriminant="type") ] diff --git a/src/cohere/types/streamed_chat_response.py b/src/cohere/types/streamed_chat_response.py index 0576316a8..32e525970 100644 --- a/src/cohere/types/streamed_chat_response.py +++ b/src/cohere/types/streamed_chat_response.py @@ -17,7 +17,7 @@ from ..core.unchecked_base_model import UnionMetadata -class StreamedChatResponse_StreamStart(UncheckedBaseModel): +class StreamStartStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -35,7 +35,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_SearchQueriesGeneration(UncheckedBaseModel): +class SearchQueriesGenerationStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -53,7 +53,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_SearchResults(UncheckedBaseModel): +class SearchResultsStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -72,7 +72,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_TextGeneration(UncheckedBaseModel): +class TextGenerationStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -90,7 +90,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_CitationGeneration(UncheckedBaseModel): +class CitationGenerationStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -108,7 +108,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_ToolCallsGeneration(UncheckedBaseModel): +class ToolCallsGenerationStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -127,7 +127,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_StreamEnd(UncheckedBaseModel): +class StreamEndStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -146,7 +146,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_ToolCallsChunk(UncheckedBaseModel): +class ToolCallsChunkStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -166,14 +166,14 @@ class Config: StreamedChatResponse = typing_extensions.Annotated[ typing.Union[ - StreamedChatResponse_StreamStart, - StreamedChatResponse_SearchQueriesGeneration, - StreamedChatResponse_SearchResults, - StreamedChatResponse_TextGeneration, - StreamedChatResponse_CitationGeneration, - StreamedChatResponse_ToolCallsGeneration, - StreamedChatResponse_StreamEnd, - StreamedChatResponse_ToolCallsChunk, + StreamStartStreamedChatResponse, + SearchQueriesGenerationStreamedChatResponse, + SearchResultsStreamedChatResponse, + TextGenerationStreamedChatResponse, + CitationGenerationStreamedChatResponse, + ToolCallsGenerationStreamedChatResponse, + StreamEndStreamedChatResponse, + ToolCallsChunkStreamedChatResponse, ], UnionMetadata(discriminant="event_type"), ] diff --git a/src/cohere/v2/__init__.py b/src/cohere/v2/__init__.py index 922af5e29..d284db3cb 100644 --- a/src/cohere/v2/__init__.py +++ b/src/cohere/v2/__init__.py @@ -1,13 +1,12 @@ # This file was auto-generated by Fern from our API Definition. from .types import ( + AssistantChatMessage2, AssistantMessage, AssistantMessageContent, AssistantMessageContentItem, - AssistantMessageContentItem_Text, AssistantMessageResponse, AssistantMessageResponseContentItem, - AssistantMessageResponseContentItem_Text, ChatContentDeltaEvent, ChatContentDeltaEventDelta, ChatContentDeltaEventDeltaMessage, @@ -19,10 +18,6 @@ ChatContentStartEventDeltaMessageContent, ChatFinishReason, ChatMessage2, - ChatMessage2_Assistant, - ChatMessage2_System, - ChatMessage2_Tool, - ChatMessage2_User, ChatMessageEndEvent, ChatMessageEndEventDelta, ChatMessageStartEvent, @@ -43,63 +38,80 @@ ChatToolPlanDeltaEventDelta, Citation, CitationEndEvent, + CitationEndStreamedChatResponse2, + CitationOptions, + CitationOptionsMode, CitationStartEvent, CitationStartEventDelta, CitationStartEventDeltaMessage, + CitationStartStreamedChatResponse2, + ClassificationV2EmbedRequest, + ClusteringV2EmbedRequest, Content, - Content_Text, + ContentDeltaStreamedChatResponse2, + ContentEndStreamedChatResponse2, + ContentStartStreamedChatResponse2, + Document, + DocumentContent, DocumentSource, + DocumentToolContent, + ImageV2EmbedRequest, + Images, + JsonObjectResponseFormat2, JsonResponseFormat2, + MessageEndStreamedChatResponse2, + MessageStartStreamedChatResponse2, NonStreamedChatResponse2, ResponseFormat2, - ResponseFormat2_JsonObject, - ResponseFormat2_Text, + SearchDocumentV2EmbedRequest, + SearchQueryV2EmbedRequest, Source, - Source_Document, - Source_Tool, StreamedChatResponse2, - StreamedChatResponse2_CitationEnd, - StreamedChatResponse2_CitationStart, - StreamedChatResponse2_ContentDelta, - StreamedChatResponse2_ContentEnd, - StreamedChatResponse2_ContentStart, - StreamedChatResponse2_MessageEnd, - StreamedChatResponse2_MessageStart, - StreamedChatResponse2_ToolCallDelta, - StreamedChatResponse2_ToolCallEnd, - StreamedChatResponse2_ToolCallStart, - StreamedChatResponse2_ToolPlanDelta, + SystemChatMessage2, SystemMessage, SystemMessageContent, SystemMessageContentItem, - SystemMessageContentItem_Text, + TextAssistantMessageContentItem, + TextAssistantMessageResponseContentItem, TextContent, TextResponseFormat2, + TextSystemMessageContentItem, + TextToolContent, + Texts, + TextsTruncate, Tool2, Tool2Function, ToolCall2, ToolCall2Function, + ToolCallDeltaStreamedChatResponse2, + ToolCallEndStreamedChatResponse2, + ToolCallStartStreamedChatResponse2, + ToolChatMessage2, + ToolContent, ToolMessage2, + ToolMessage2ToolContent, + ToolPlanDeltaStreamedChatResponse2, ToolSource, Usage, UsageBilledUnits, UsageTokens, + UserChatMessage2, UserMessage, UserMessageContent, - V2ChatRequestCitationMode, + V2ChatRequestDocumentsItem, V2ChatRequestSafetyMode, - V2ChatStreamRequestCitationMode, + V2ChatStreamRequestDocumentsItem, V2ChatStreamRequestSafetyMode, + V2EmbedRequest, ) __all__ = [ + "AssistantChatMessage2", "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentItem", - "AssistantMessageContentItem_Text", "AssistantMessageResponse", "AssistantMessageResponseContentItem", - "AssistantMessageResponseContentItem_Text", "ChatContentDeltaEvent", "ChatContentDeltaEventDelta", "ChatContentDeltaEventDeltaMessage", @@ -111,10 +123,6 @@ "ChatContentStartEventDeltaMessageContent", "ChatFinishReason", "ChatMessage2", - "ChatMessage2_Assistant", - "ChatMessage2_System", - "ChatMessage2_Tool", - "ChatMessage2_User", "ChatMessageEndEvent", "ChatMessageEndEventDelta", "ChatMessageStartEvent", @@ -135,51 +143,69 @@ "ChatToolPlanDeltaEventDelta", "Citation", "CitationEndEvent", + "CitationEndStreamedChatResponse2", + "CitationOptions", + "CitationOptionsMode", "CitationStartEvent", "CitationStartEventDelta", "CitationStartEventDeltaMessage", + "CitationStartStreamedChatResponse2", + "ClassificationV2EmbedRequest", + "ClusteringV2EmbedRequest", "Content", - "Content_Text", + "ContentDeltaStreamedChatResponse2", + "ContentEndStreamedChatResponse2", + "ContentStartStreamedChatResponse2", + "Document", + "DocumentContent", "DocumentSource", + "DocumentToolContent", + "ImageV2EmbedRequest", + "Images", + "JsonObjectResponseFormat2", "JsonResponseFormat2", + "MessageEndStreamedChatResponse2", + "MessageStartStreamedChatResponse2", "NonStreamedChatResponse2", "ResponseFormat2", - "ResponseFormat2_JsonObject", - "ResponseFormat2_Text", + "SearchDocumentV2EmbedRequest", + "SearchQueryV2EmbedRequest", "Source", - "Source_Document", - "Source_Tool", "StreamedChatResponse2", - "StreamedChatResponse2_CitationEnd", - "StreamedChatResponse2_CitationStart", - "StreamedChatResponse2_ContentDelta", - "StreamedChatResponse2_ContentEnd", - "StreamedChatResponse2_ContentStart", - "StreamedChatResponse2_MessageEnd", - "StreamedChatResponse2_MessageStart", - "StreamedChatResponse2_ToolCallDelta", - "StreamedChatResponse2_ToolCallEnd", - "StreamedChatResponse2_ToolCallStart", - "StreamedChatResponse2_ToolPlanDelta", + "SystemChatMessage2", "SystemMessage", "SystemMessageContent", "SystemMessageContentItem", - "SystemMessageContentItem_Text", + "TextAssistantMessageContentItem", + "TextAssistantMessageResponseContentItem", "TextContent", "TextResponseFormat2", + "TextSystemMessageContentItem", + "TextToolContent", + "Texts", + "TextsTruncate", "Tool2", "Tool2Function", "ToolCall2", "ToolCall2Function", + "ToolCallDeltaStreamedChatResponse2", + "ToolCallEndStreamedChatResponse2", + "ToolCallStartStreamedChatResponse2", + "ToolChatMessage2", + "ToolContent", "ToolMessage2", + "ToolMessage2ToolContent", + "ToolPlanDeltaStreamedChatResponse2", "ToolSource", "Usage", "UsageBilledUnits", "UsageTokens", + "UserChatMessage2", "UserMessage", "UserMessageContent", - "V2ChatRequestCitationMode", + "V2ChatRequestDocumentsItem", "V2ChatRequestSafetyMode", - "V2ChatStreamRequestCitationMode", + "V2ChatStreamRequestDocumentsItem", "V2ChatStreamRequestSafetyMode", + "V2EmbedRequest", ] diff --git a/src/cohere/v2/client.py b/src/cohere/v2/client.py index 34340141f..d18ab742b 100644 --- a/src/cohere/v2/client.py +++ b/src/cohere/v2/client.py @@ -4,7 +4,8 @@ from ..core.client_wrapper import SyncClientWrapper from .types.chat_messages import ChatMessages from .types.tool2 import Tool2 -from .types.v2chat_stream_request_citation_mode import V2ChatStreamRequestCitationMode +from .types.v2chat_stream_request_documents_item import V2ChatStreamRequestDocumentsItem +from .types.citation_options import CitationOptions from .types.response_format2 import ResponseFormat2 from .types.v2chat_stream_request_safety_mode import V2ChatStreamRequestSafetyMode from ..core.request_options import RequestOptions @@ -31,9 +32,11 @@ from ..types.gateway_timeout_error_body import GatewayTimeoutErrorBody from json.decoder import JSONDecodeError from ..core.api_error import ApiError -from .types.v2chat_request_citation_mode import V2ChatRequestCitationMode +from .types.v2chat_request_documents_item import V2ChatRequestDocumentsItem from .types.v2chat_request_safety_mode import V2ChatRequestSafetyMode from .types.non_streamed_chat_response2 import NonStreamedChatResponse2 +from .types.v2embed_request import V2EmbedRequest +from ..types.embed_by_type_response import EmbedByTypeResponse from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -50,7 +53,8 @@ def chat_stream( model: str, messages: ChatMessages, tools: typing.Optional[typing.Sequence[Tool2]] = OMIT, - citation_mode: typing.Optional[V2ChatStreamRequestCitationMode] = OMIT, + documents: typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]] = OMIT, + citation_options: typing.Optional[CitationOptions] = OMIT, response_format: typing.Optional[ResponseFormat2] = OMIT, safety_mode: typing.Optional[V2ChatStreamRequestSafetyMode] = OMIT, max_tokens: typing.Optional[int] = OMIT, @@ -80,11 +84,12 @@ def chat_stream( When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - citation_mode : typing.Optional[V2ChatStreamRequestCitationMode] - Defaults to `"accurate"`. - Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + documents : typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]] + A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. + citation_options : typing.Optional[CitationOptions] + response_format : typing.Optional[ResponseFormat2] safety_mode : typing.Optional[V2ChatStreamRequestSafetyMode] @@ -156,10 +161,11 @@ def chat_stream( -------- from cohere import Client from cohere.v2 import ( - ChatMessage2_User, - ResponseFormat2_Text, + CitationOptions, + TextResponseFormat2, Tool2, Tool2Function, + UserChatMessage2, ) client = Client( @@ -169,9 +175,8 @@ def chat_stream( response = client.v2.chat_stream( model="string", messages=[ - ChatMessage2_User( + UserChatMessage2( content="string", - documents=[{"string": {"key": "value"}}], ) ], tools=[ @@ -183,8 +188,11 @@ def chat_stream( ), ) ], - citation_mode="FAST", - response_format=ResponseFormat2_Text(), + documents=["string"], + citation_options=CitationOptions( + mode="FAST", + ), + response_format=TextResponseFormat2(), safety_mode="CONTEXTUAL", max_tokens=1, stop_sequences=["string"], @@ -210,7 +218,12 @@ def chat_stream( "tools": convert_and_respect_annotation_metadata( object_=tools, annotation=typing.Sequence[Tool2], direction="write" ), - "citation_mode": citation_mode, + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2ChatStreamRequestDocumentsItem], direction="write" + ), + "citation_options": convert_and_respect_annotation_metadata( + object_=citation_options, annotation=CitationOptions, direction="write" + ), "response_format": convert_and_respect_annotation_metadata( object_=response_format, annotation=ResponseFormat2, direction="write" ), @@ -366,7 +379,8 @@ def chat( model: str, messages: ChatMessages, tools: typing.Optional[typing.Sequence[Tool2]] = OMIT, - citation_mode: typing.Optional[V2ChatRequestCitationMode] = OMIT, + documents: typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]] = OMIT, + citation_options: typing.Optional[CitationOptions] = OMIT, response_format: typing.Optional[ResponseFormat2] = OMIT, safety_mode: typing.Optional[V2ChatRequestSafetyMode] = OMIT, max_tokens: typing.Optional[int] = OMIT, @@ -396,11 +410,12 @@ def chat( When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - citation_mode : typing.Optional[V2ChatRequestCitationMode] - Defaults to `"accurate"`. - Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + documents : typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]] + A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. + citation_options : typing.Optional[CitationOptions] + response_format : typing.Optional[ResponseFormat2] safety_mode : typing.Optional[V2ChatRequestSafetyMode] @@ -471,7 +486,7 @@ def chat( Examples -------- from cohere import Client - from cohere.v2 import ChatMessage2_Tool + from cohere.v2 import ToolChatMessage2 client = Client( client_name="YOUR_CLIENT_NAME", @@ -480,9 +495,9 @@ def chat( client.v2.chat( model="model", messages=[ - ChatMessage2_Tool( + ToolChatMessage2( tool_call_id="messages", - tool_content=["messages"], + tool_content="messages", ) ], ) @@ -498,7 +513,12 @@ def chat( "tools": convert_and_respect_annotation_metadata( object_=tools, annotation=typing.Sequence[Tool2], direction="write" ), - "citation_mode": citation_mode, + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2ChatRequestDocumentsItem], direction="write" + ), + "citation_options": convert_and_respect_annotation_metadata( + object_=citation_options, annotation=CitationOptions, direction="write" + ), "response_format": convert_and_respect_annotation_metadata( object_=response_format, annotation=ResponseFormat2, direction="write" ), @@ -641,6 +661,175 @@ def chat( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def embed( + self, *, request: V2EmbedRequest, request_options: typing.Optional[RequestOptions] = None + ) -> EmbedByTypeResponse: + """ + This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. + + Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. + + If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). + + Parameters + ---------- + request : V2EmbedRequest + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmbedByTypeResponse + OK + + Examples + -------- + from cohere import Client + from cohere.v2 import ImageV2EmbedRequest + + client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + client.v2.embed( + request=ImageV2EmbedRequest( + images=["string"], + model="string", + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v2/embed", + method="POST", + json=convert_and_respect_annotation_metadata(object_=request, annotation=V2EmbedRequest, direction="write"), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EmbedByTypeResponse, + construct_type( + type_=EmbedByTypeResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 401: + raise UnauthorizedError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + UnprocessableEntityErrorBody, + construct_type( + type_=UnprocessableEntityErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + TooManyRequestsErrorBody, + construct_type( + type_=TooManyRequestsErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + typing.cast( + ClientClosedRequestErrorBody, + construct_type( + type_=ClientClosedRequestErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 500: + raise InternalServerError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 501: + raise NotImplementedError( + typing.cast( + NotImplementedErrorBody, + construct_type( + type_=NotImplementedErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + typing.cast( + GatewayTimeoutErrorBody, + construct_type( + type_=GatewayTimeoutErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + class AsyncV2Client: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -652,7 +841,8 @@ async def chat_stream( model: str, messages: ChatMessages, tools: typing.Optional[typing.Sequence[Tool2]] = OMIT, - citation_mode: typing.Optional[V2ChatStreamRequestCitationMode] = OMIT, + documents: typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]] = OMIT, + citation_options: typing.Optional[CitationOptions] = OMIT, response_format: typing.Optional[ResponseFormat2] = OMIT, safety_mode: typing.Optional[V2ChatStreamRequestSafetyMode] = OMIT, max_tokens: typing.Optional[int] = OMIT, @@ -682,11 +872,12 @@ async def chat_stream( When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - citation_mode : typing.Optional[V2ChatStreamRequestCitationMode] - Defaults to `"accurate"`. - Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + documents : typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]] + A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. + citation_options : typing.Optional[CitationOptions] + response_format : typing.Optional[ResponseFormat2] safety_mode : typing.Optional[V2ChatStreamRequestSafetyMode] @@ -760,10 +951,11 @@ async def chat_stream( from cohere import AsyncClient from cohere.v2 import ( - ChatMessage2_User, - ResponseFormat2_Text, + CitationOptions, + TextResponseFormat2, Tool2, Tool2Function, + UserChatMessage2, ) client = AsyncClient( @@ -776,9 +968,8 @@ async def main() -> None: response = await client.v2.chat_stream( model="string", messages=[ - ChatMessage2_User( + UserChatMessage2( content="string", - documents=[{"string": {"key": "value"}}], ) ], tools=[ @@ -790,8 +981,11 @@ async def main() -> None: ), ) ], - citation_mode="FAST", - response_format=ResponseFormat2_Text(), + documents=["string"], + citation_options=CitationOptions( + mode="FAST", + ), + response_format=TextResponseFormat2(), safety_mode="CONTEXTUAL", max_tokens=1, stop_sequences=["string"], @@ -820,7 +1014,12 @@ async def main() -> None: "tools": convert_and_respect_annotation_metadata( object_=tools, annotation=typing.Sequence[Tool2], direction="write" ), - "citation_mode": citation_mode, + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2ChatStreamRequestDocumentsItem], direction="write" + ), + "citation_options": convert_and_respect_annotation_metadata( + object_=citation_options, annotation=CitationOptions, direction="write" + ), "response_format": convert_and_respect_annotation_metadata( object_=response_format, annotation=ResponseFormat2, direction="write" ), @@ -976,7 +1175,8 @@ async def chat( model: str, messages: ChatMessages, tools: typing.Optional[typing.Sequence[Tool2]] = OMIT, - citation_mode: typing.Optional[V2ChatRequestCitationMode] = OMIT, + documents: typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]] = OMIT, + citation_options: typing.Optional[CitationOptions] = OMIT, response_format: typing.Optional[ResponseFormat2] = OMIT, safety_mode: typing.Optional[V2ChatRequestSafetyMode] = OMIT, max_tokens: typing.Optional[int] = OMIT, @@ -1006,10 +1206,11 @@ async def chat( When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - citation_mode : typing.Optional[V2ChatRequestCitationMode] - Defaults to `"accurate"`. - Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + documents : typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]] + A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. + + citation_options : typing.Optional[CitationOptions] response_format : typing.Optional[ResponseFormat2] @@ -1083,7 +1284,7 @@ async def chat( import asyncio from cohere import AsyncClient - from cohere.v2 import ChatMessage2_Tool + from cohere.v2 import ToolChatMessage2 client = AsyncClient( client_name="YOUR_CLIENT_NAME", @@ -1095,9 +1296,9 @@ async def main() -> None: await client.v2.chat( model="model", messages=[ - ChatMessage2_Tool( + ToolChatMessage2( tool_call_id="messages", - tool_content=["messages"], + tool_content="messages", ) ], ) @@ -1116,7 +1317,12 @@ async def main() -> None: "tools": convert_and_respect_annotation_metadata( object_=tools, annotation=typing.Sequence[Tool2], direction="write" ), - "citation_mode": citation_mode, + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2ChatRequestDocumentsItem], direction="write" + ), + "citation_options": convert_and_respect_annotation_metadata( + object_=citation_options, annotation=CitationOptions, direction="write" + ), "response_format": convert_and_respect_annotation_metadata( object_=response_format, annotation=ResponseFormat2, direction="write" ), @@ -1258,3 +1464,180 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + + async def embed( + self, *, request: V2EmbedRequest, request_options: typing.Optional[RequestOptions] = None + ) -> EmbedByTypeResponse: + """ + This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. + + Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. + + If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). + + Parameters + ---------- + request : V2EmbedRequest + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmbedByTypeResponse + OK + + Examples + -------- + import asyncio + + from cohere import AsyncClient + from cohere.v2 import ImageV2EmbedRequest + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.v2.embed( + request=ImageV2EmbedRequest( + images=["string"], + model="string", + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v2/embed", + method="POST", + json=convert_and_respect_annotation_metadata(object_=request, annotation=V2EmbedRequest, direction="write"), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EmbedByTypeResponse, + construct_type( + type_=EmbedByTypeResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 401: + raise UnauthorizedError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + UnprocessableEntityErrorBody, + construct_type( + type_=UnprocessableEntityErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + TooManyRequestsErrorBody, + construct_type( + type_=TooManyRequestsErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + typing.cast( + ClientClosedRequestErrorBody, + construct_type( + type_=ClientClosedRequestErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 500: + raise InternalServerError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 501: + raise NotImplementedError( + typing.cast( + NotImplementedErrorBody, + construct_type( + type_=NotImplementedErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + typing.cast( + GatewayTimeoutErrorBody, + construct_type( + type_=GatewayTimeoutErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/cohere/v2/types/__init__.py b/src/cohere/v2/types/__init__.py index 33038791e..f946c8f14 100644 --- a/src/cohere/v2/types/__init__.py +++ b/src/cohere/v2/types/__init__.py @@ -2,11 +2,11 @@ from .assistant_message import AssistantMessage from .assistant_message_content import AssistantMessageContent -from .assistant_message_content_item import AssistantMessageContentItem, AssistantMessageContentItem_Text +from .assistant_message_content_item import AssistantMessageContentItem, TextAssistantMessageContentItem from .assistant_message_response import AssistantMessageResponse from .assistant_message_response_content_item import ( AssistantMessageResponseContentItem, - AssistantMessageResponseContentItem_Text, + TextAssistantMessageResponseContentItem, ) from .chat_content_delta_event import ChatContentDeltaEvent from .chat_content_delta_event_delta import ChatContentDeltaEventDelta @@ -18,13 +18,7 @@ from .chat_content_start_event_delta_message import ChatContentStartEventDeltaMessage from .chat_content_start_event_delta_message_content import ChatContentStartEventDeltaMessageContent from .chat_finish_reason import ChatFinishReason -from .chat_message2 import ( - ChatMessage2, - ChatMessage2_Assistant, - ChatMessage2_System, - ChatMessage2_Tool, - ChatMessage2_User, -) +from .chat_message2 import AssistantChatMessage2, ChatMessage2, SystemChatMessage2, ToolChatMessage2, UserChatMessage2 from .chat_message_end_event import ChatMessageEndEvent from .chat_message_end_event_delta import ChatMessageEndEventDelta from .chat_message_start_event import ChatMessageStartEvent @@ -45,58 +39,74 @@ from .chat_tool_plan_delta_event_delta import ChatToolPlanDeltaEventDelta from .citation import Citation from .citation_end_event import CitationEndEvent +from .citation_options import CitationOptions +from .citation_options_mode import CitationOptionsMode from .citation_start_event import CitationStartEvent from .citation_start_event_delta import CitationStartEventDelta from .citation_start_event_delta_message import CitationStartEventDeltaMessage -from .content import Content, Content_Text +from .content import Content, TextContent +from .document import Document +from .document_content import DocumentContent from .document_source import DocumentSource +from .images import Images from .json_response_format2 import JsonResponseFormat2 from .non_streamed_chat_response2 import NonStreamedChatResponse2 -from .response_format2 import ResponseFormat2, ResponseFormat2_JsonObject, ResponseFormat2_Text -from .source import Source, Source_Document, Source_Tool +from .response_format2 import JsonObjectResponseFormat2, ResponseFormat2, TextResponseFormat2 +from .source import DocumentSource, Source, ToolSource from .streamed_chat_response2 import ( + CitationEndStreamedChatResponse2, + CitationStartStreamedChatResponse2, + ContentDeltaStreamedChatResponse2, + ContentEndStreamedChatResponse2, + ContentStartStreamedChatResponse2, + MessageEndStreamedChatResponse2, + MessageStartStreamedChatResponse2, StreamedChatResponse2, - StreamedChatResponse2_CitationEnd, - StreamedChatResponse2_CitationStart, - StreamedChatResponse2_ContentDelta, - StreamedChatResponse2_ContentEnd, - StreamedChatResponse2_ContentStart, - StreamedChatResponse2_MessageEnd, - StreamedChatResponse2_MessageStart, - StreamedChatResponse2_ToolCallDelta, - StreamedChatResponse2_ToolCallEnd, - StreamedChatResponse2_ToolCallStart, - StreamedChatResponse2_ToolPlanDelta, + ToolCallDeltaStreamedChatResponse2, + ToolCallEndStreamedChatResponse2, + ToolCallStartStreamedChatResponse2, + ToolPlanDeltaStreamedChatResponse2, ) from .system_message import SystemMessage from .system_message_content import SystemMessageContent -from .system_message_content_item import SystemMessageContentItem, SystemMessageContentItem_Text +from .system_message_content_item import SystemMessageContentItem, TextSystemMessageContentItem from .text_content import TextContent from .text_response_format2 import TextResponseFormat2 +from .texts import Texts +from .texts_truncate import TextsTruncate from .tool2 import Tool2 from .tool2function import Tool2Function from .tool_call2 import ToolCall2 from .tool_call2function import ToolCall2Function +from .tool_content import DocumentToolContent, TextToolContent, ToolContent from .tool_message2 import ToolMessage2 +from .tool_message2tool_content import ToolMessage2ToolContent from .tool_source import ToolSource from .usage import Usage from .usage_billed_units import UsageBilledUnits from .usage_tokens import UsageTokens from .user_message import UserMessage from .user_message_content import UserMessageContent -from .v2chat_request_citation_mode import V2ChatRequestCitationMode +from .v2chat_request_documents_item import V2ChatRequestDocumentsItem from .v2chat_request_safety_mode import V2ChatRequestSafetyMode -from .v2chat_stream_request_citation_mode import V2ChatStreamRequestCitationMode +from .v2chat_stream_request_documents_item import V2ChatStreamRequestDocumentsItem from .v2chat_stream_request_safety_mode import V2ChatStreamRequestSafetyMode +from .v2embed_request import ( + ClassificationV2EmbedRequest, + ClusteringV2EmbedRequest, + ImageV2EmbedRequest, + SearchDocumentV2EmbedRequest, + SearchQueryV2EmbedRequest, + V2EmbedRequest, +) __all__ = [ + "AssistantChatMessage2", "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentItem", - "AssistantMessageContentItem_Text", "AssistantMessageResponse", "AssistantMessageResponseContentItem", - "AssistantMessageResponseContentItem_Text", "ChatContentDeltaEvent", "ChatContentDeltaEventDelta", "ChatContentDeltaEventDeltaMessage", @@ -108,10 +118,6 @@ "ChatContentStartEventDeltaMessageContent", "ChatFinishReason", "ChatMessage2", - "ChatMessage2_Assistant", - "ChatMessage2_System", - "ChatMessage2_Tool", - "ChatMessage2_User", "ChatMessageEndEvent", "ChatMessageEndEventDelta", "ChatMessageStartEvent", @@ -132,51 +138,69 @@ "ChatToolPlanDeltaEventDelta", "Citation", "CitationEndEvent", + "CitationEndStreamedChatResponse2", + "CitationOptions", + "CitationOptionsMode", "CitationStartEvent", "CitationStartEventDelta", "CitationStartEventDeltaMessage", + "CitationStartStreamedChatResponse2", + "ClassificationV2EmbedRequest", + "ClusteringV2EmbedRequest", "Content", - "Content_Text", + "ContentDeltaStreamedChatResponse2", + "ContentEndStreamedChatResponse2", + "ContentStartStreamedChatResponse2", + "Document", + "DocumentContent", "DocumentSource", + "DocumentToolContent", + "ImageV2EmbedRequest", + "Images", + "JsonObjectResponseFormat2", "JsonResponseFormat2", + "MessageEndStreamedChatResponse2", + "MessageStartStreamedChatResponse2", "NonStreamedChatResponse2", "ResponseFormat2", - "ResponseFormat2_JsonObject", - "ResponseFormat2_Text", + "SearchDocumentV2EmbedRequest", + "SearchQueryV2EmbedRequest", "Source", - "Source_Document", - "Source_Tool", "StreamedChatResponse2", - "StreamedChatResponse2_CitationEnd", - "StreamedChatResponse2_CitationStart", - "StreamedChatResponse2_ContentDelta", - "StreamedChatResponse2_ContentEnd", - "StreamedChatResponse2_ContentStart", - "StreamedChatResponse2_MessageEnd", - "StreamedChatResponse2_MessageStart", - "StreamedChatResponse2_ToolCallDelta", - "StreamedChatResponse2_ToolCallEnd", - "StreamedChatResponse2_ToolCallStart", - "StreamedChatResponse2_ToolPlanDelta", + "SystemChatMessage2", "SystemMessage", "SystemMessageContent", "SystemMessageContentItem", - "SystemMessageContentItem_Text", + "TextAssistantMessageContentItem", + "TextAssistantMessageResponseContentItem", "TextContent", "TextResponseFormat2", + "TextSystemMessageContentItem", + "TextToolContent", + "Texts", + "TextsTruncate", "Tool2", "Tool2Function", "ToolCall2", "ToolCall2Function", + "ToolCallDeltaStreamedChatResponse2", + "ToolCallEndStreamedChatResponse2", + "ToolCallStartStreamedChatResponse2", + "ToolChatMessage2", + "ToolContent", "ToolMessage2", + "ToolMessage2ToolContent", + "ToolPlanDeltaStreamedChatResponse2", "ToolSource", "Usage", "UsageBilledUnits", "UsageTokens", + "UserChatMessage2", "UserMessage", "UserMessageContent", - "V2ChatRequestCitationMode", + "V2ChatRequestDocumentsItem", "V2ChatRequestSafetyMode", - "V2ChatStreamRequestCitationMode", + "V2ChatStreamRequestDocumentsItem", "V2ChatStreamRequestSafetyMode", + "V2EmbedRequest", ] diff --git a/src/cohere/v2/types/assistant_message_content_item.py b/src/cohere/v2/types/assistant_message_content_item.py index 7f2936c96..9ddfc7575 100644 --- a/src/cohere/v2/types/assistant_message_content_item.py +++ b/src/cohere/v2/types/assistant_message_content_item.py @@ -9,7 +9,7 @@ from ...core.unchecked_base_model import UnionMetadata -class AssistantMessageContentItem_Text(UncheckedBaseModel): +class TextAssistantMessageContentItem(UncheckedBaseModel): type: typing.Literal["text"] = "text" text: str @@ -24,5 +24,5 @@ class Config: AssistantMessageContentItem = typing_extensions.Annotated[ - AssistantMessageContentItem_Text, UnionMetadata(discriminant="type") + TextAssistantMessageContentItem, UnionMetadata(discriminant="type") ] diff --git a/src/cohere/v2/types/assistant_message_response_content_item.py b/src/cohere/v2/types/assistant_message_response_content_item.py index 2c49973f5..e00fd158c 100644 --- a/src/cohere/v2/types/assistant_message_response_content_item.py +++ b/src/cohere/v2/types/assistant_message_response_content_item.py @@ -9,7 +9,7 @@ from ...core.unchecked_base_model import UnionMetadata -class AssistantMessageResponseContentItem_Text(UncheckedBaseModel): +class TextAssistantMessageResponseContentItem(UncheckedBaseModel): type: typing.Literal["text"] = "text" text: str @@ -24,5 +24,5 @@ class Config: AssistantMessageResponseContentItem = typing_extensions.Annotated[ - AssistantMessageResponseContentItem_Text, UnionMetadata(discriminant="type") + TextAssistantMessageResponseContentItem, UnionMetadata(discriminant="type") ] diff --git a/src/cohere/v2/types/chat_message2.py b/src/cohere/v2/types/chat_message2.py index 0abc57ff4..a13e63245 100644 --- a/src/cohere/v2/types/chat_message2.py +++ b/src/cohere/v2/types/chat_message2.py @@ -4,25 +4,24 @@ from ...core.unchecked_base_model import UncheckedBaseModel import typing from .user_message_content import UserMessageContent -from ...types.chat_document import ChatDocument from ...core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic from .tool_call2 import ToolCall2 from .assistant_message_content import AssistantMessageContent from .citation import Citation from .system_message_content import SystemMessageContent +from .tool_message2tool_content import ToolMessage2ToolContent import typing_extensions from ...core.unchecked_base_model import UnionMetadata -class ChatMessage2_User(UncheckedBaseModel): +class UserChatMessage2(UncheckedBaseModel): """ Represents a single message in the chat history from a given role. """ role: typing.Literal["user"] = "user" content: UserMessageContent - documents: typing.Optional[typing.List[ChatDocument]] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 @@ -34,7 +33,7 @@ class Config: extra = pydantic.Extra.allow -class ChatMessage2_Assistant(UncheckedBaseModel): +class AssistantChatMessage2(UncheckedBaseModel): """ Represents a single message in the chat history from a given role. """ @@ -55,7 +54,7 @@ class Config: extra = pydantic.Extra.allow -class ChatMessage2_System(UncheckedBaseModel): +class SystemChatMessage2(UncheckedBaseModel): """ Represents a single message in the chat history from a given role. """ @@ -73,14 +72,14 @@ class Config: extra = pydantic.Extra.allow -class ChatMessage2_Tool(UncheckedBaseModel): +class ToolChatMessage2(UncheckedBaseModel): """ Represents a single message in the chat history from a given role. """ role: typing.Literal["tool"] = "tool" tool_call_id: str - tool_content: typing.List[str] + tool_content: ToolMessage2ToolContent if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 @@ -93,6 +92,6 @@ class Config: ChatMessage2 = typing_extensions.Annotated[ - typing.Union[ChatMessage2_User, ChatMessage2_Assistant, ChatMessage2_System, ChatMessage2_Tool], + typing.Union[UserChatMessage2, AssistantChatMessage2, SystemChatMessage2, ToolChatMessage2], UnionMetadata(discriminant="role"), ] diff --git a/src/cohere/v2/types/citation_options.py b/src/cohere/v2/types/citation_options.py new file mode 100644 index 000000000..a3ea55a4a --- /dev/null +++ b/src/cohere/v2/types/citation_options.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +from .citation_options_mode import CitationOptionsMode +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class CitationOptions(UncheckedBaseModel): + """ + Options for controlling citation generation. + """ + + mode: typing.Optional[CitationOptionsMode] = pydantic.Field(default=None) + """ + Defaults to `"accurate"`. + Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/citation_options_mode.py b/src/cohere/v2/types/citation_options_mode.py new file mode 100644 index 000000000..ddfdf67f2 --- /dev/null +++ b/src/cohere/v2/types/citation_options_mode.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CitationOptionsMode = typing.Union[typing.Literal["FAST", "ACCURATE", "OFF"], typing.Any] diff --git a/src/cohere/v2/types/content.py b/src/cohere/v2/types/content.py index 597aa8348..2c7290d73 100644 --- a/src/cohere/v2/types/content.py +++ b/src/cohere/v2/types/content.py @@ -9,7 +9,7 @@ from ...core.unchecked_base_model import UnionMetadata -class Content_Text(UncheckedBaseModel): +class TextContent(UncheckedBaseModel): """ A Content block which contains information about the content type and the content itself. """ @@ -27,4 +27,4 @@ class Config: extra = pydantic.Extra.allow -Content = typing_extensions.Annotated[Content_Text, UnionMetadata(discriminant="type")] +Content = typing_extensions.Annotated[TextContent, UnionMetadata(discriminant="type")] diff --git a/src/cohere/v2/types/document.py b/src/cohere/v2/types/document.py new file mode 100644 index 000000000..b96de850f --- /dev/null +++ b/src/cohere/v2/types/document.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class Document(UncheckedBaseModel): + """ + Relevant information that could be used by the model to generate a more accurate reply. + The content of each document are generally short (should be under 300 words). Metadata should be used to provide additional information, both the key name and the value will be + passed to the model. + """ + + data: typing.Dict[str, str] = pydantic.Field() + """ + A relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary. + """ + + id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for this document which will be referenced in citations. If not provided an ID will be automatically generated + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/document_content.py b/src/cohere/v2/types/document_content.py new file mode 100644 index 000000000..999dcb4a3 --- /dev/null +++ b/src/cohere/v2/types/document_content.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +from .document import Document +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class DocumentContent(UncheckedBaseModel): + """ + Document content. + """ + + document: Document + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/images.py b/src/cohere/v2/types/images.py new file mode 100644 index 000000000..c3d7ce121 --- /dev/null +++ b/src/cohere/v2/types/images.py @@ -0,0 +1,50 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from ...types.embedding_type import EmbeddingType +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class Images(UncheckedBaseModel): + images: typing.List[str] = pydantic.Field() + """ + An array of image data URIs for the model to embed. Maximum number of images per call is `1`. + The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB. + """ + + model: str = pydantic.Field() + """ + Defaults to embed-english-v2.0 + The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. + Available models and corresponding embedding dimensions: + + - `embed-english-v3.0` 1024 + - `embed-multilingual-v3.0` 1024 + - `embed-english-light-v3.0` 384 + - `embed-multilingual-light-v3.0` 384 + - `embed-english-v2.0` 4096 + - `embed-english-light-v2.0` 1024 + - `embed-multilingual-v2.0` 768 + """ + + embedding_types: typing.Optional[typing.List[EmbeddingType]] = pydantic.Field(default=None) + """ + Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. + + - `"float"`: Use this when you want to get back the default float embeddings. Valid for all models. + - `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models. + - `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models. + - `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models. + - `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/response_format2.py b/src/cohere/v2/types/response_format2.py index 6d3316b3c..9fbec3997 100644 --- a/src/cohere/v2/types/response_format2.py +++ b/src/cohere/v2/types/response_format2.py @@ -9,7 +9,7 @@ from ...core.unchecked_base_model import UnionMetadata -class ResponseFormat2_Text(UncheckedBaseModel): +class TextResponseFormat2(UncheckedBaseModel): """ Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/docs/command-r), [Command R+](https://docs.cohere.com/docs/command-r-plus) and newer models. @@ -33,7 +33,7 @@ class Config: extra = pydantic.Extra.allow -class ResponseFormat2_JsonObject(UncheckedBaseModel): +class JsonObjectResponseFormat2(UncheckedBaseModel): """ Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/docs/command-r), [Command R+](https://docs.cohere.com/docs/command-r-plus) and newer models. @@ -59,5 +59,5 @@ class Config: ResponseFormat2 = typing_extensions.Annotated[ - typing.Union[ResponseFormat2_Text, ResponseFormat2_JsonObject], UnionMetadata(discriminant="type") + typing.Union[TextResponseFormat2, JsonObjectResponseFormat2], UnionMetadata(discriminant="type") ] diff --git a/src/cohere/v2/types/source.py b/src/cohere/v2/types/source.py index a96fc9e6c..b91bf7277 100644 --- a/src/cohere/v2/types/source.py +++ b/src/cohere/v2/types/source.py @@ -9,7 +9,7 @@ from ...core.unchecked_base_model import UnionMetadata -class Source_Tool(UncheckedBaseModel): +class ToolSource(UncheckedBaseModel): """ A source object containing information about the source of the data cited. """ @@ -28,7 +28,7 @@ class Config: extra = pydantic.Extra.allow -class Source_Document(UncheckedBaseModel): +class DocumentSource(UncheckedBaseModel): """ A source object containing information about the source of the data cited. """ @@ -47,4 +47,4 @@ class Config: extra = pydantic.Extra.allow -Source = typing_extensions.Annotated[typing.Union[Source_Tool, Source_Document], UnionMetadata(discriminant="type")] +Source = typing_extensions.Annotated[typing.Union[ToolSource, DocumentSource], UnionMetadata(discriminant="type")] diff --git a/src/cohere/v2/types/streamed_chat_response2.py b/src/cohere/v2/types/streamed_chat_response2.py index ab8c49249..1154d60c5 100644 --- a/src/cohere/v2/types/streamed_chat_response2.py +++ b/src/cohere/v2/types/streamed_chat_response2.py @@ -17,7 +17,7 @@ from ...core.unchecked_base_model import UnionMetadata -class StreamedChatResponse2_MessageStart(UncheckedBaseModel): +class MessageStartStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -36,7 +36,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ContentStart(UncheckedBaseModel): +class ContentStartStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -55,7 +55,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ContentDelta(UncheckedBaseModel): +class ContentDeltaStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -74,7 +74,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ContentEnd(UncheckedBaseModel): +class ContentEndStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -92,7 +92,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ToolPlanDelta(UncheckedBaseModel): +class ToolPlanDeltaStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -110,7 +110,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ToolCallStart(UncheckedBaseModel): +class ToolCallStartStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -129,7 +129,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ToolCallDelta(UncheckedBaseModel): +class ToolCallDeltaStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -148,7 +148,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ToolCallEnd(UncheckedBaseModel): +class ToolCallEndStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -166,7 +166,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_CitationStart(UncheckedBaseModel): +class CitationStartStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -185,7 +185,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_CitationEnd(UncheckedBaseModel): +class CitationEndStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -203,7 +203,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_MessageEnd(UncheckedBaseModel): +class MessageEndStreamedChatResponse2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -224,17 +224,17 @@ class Config: StreamedChatResponse2 = typing_extensions.Annotated[ typing.Union[ - StreamedChatResponse2_MessageStart, - StreamedChatResponse2_ContentStart, - StreamedChatResponse2_ContentDelta, - StreamedChatResponse2_ContentEnd, - StreamedChatResponse2_ToolPlanDelta, - StreamedChatResponse2_ToolCallStart, - StreamedChatResponse2_ToolCallDelta, - StreamedChatResponse2_ToolCallEnd, - StreamedChatResponse2_CitationStart, - StreamedChatResponse2_CitationEnd, - StreamedChatResponse2_MessageEnd, + MessageStartStreamedChatResponse2, + ContentStartStreamedChatResponse2, + ContentDeltaStreamedChatResponse2, + ContentEndStreamedChatResponse2, + ToolPlanDeltaStreamedChatResponse2, + ToolCallStartStreamedChatResponse2, + ToolCallDeltaStreamedChatResponse2, + ToolCallEndStreamedChatResponse2, + CitationStartStreamedChatResponse2, + CitationEndStreamedChatResponse2, + MessageEndStreamedChatResponse2, ], UnionMetadata(discriminant="type"), ] diff --git a/src/cohere/v2/types/system_message_content_item.py b/src/cohere/v2/types/system_message_content_item.py index ed95a8a90..79d261df3 100644 --- a/src/cohere/v2/types/system_message_content_item.py +++ b/src/cohere/v2/types/system_message_content_item.py @@ -9,7 +9,7 @@ from ...core.unchecked_base_model import UnionMetadata -class SystemMessageContentItem_Text(UncheckedBaseModel): +class TextSystemMessageContentItem(UncheckedBaseModel): type: typing.Literal["text"] = "text" text: str @@ -23,6 +23,4 @@ class Config: extra = pydantic.Extra.allow -SystemMessageContentItem = typing_extensions.Annotated[ - SystemMessageContentItem_Text, UnionMetadata(discriminant="type") -] +SystemMessageContentItem = typing_extensions.Annotated[TextSystemMessageContentItem, UnionMetadata(discriminant="type")] diff --git a/src/cohere/v2/types/texts.py b/src/cohere/v2/types/texts.py new file mode 100644 index 000000000..b24280910 --- /dev/null +++ b/src/cohere/v2/types/texts.py @@ -0,0 +1,62 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from ...types.embedding_type import EmbeddingType +from .texts_truncate import TextsTruncate +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class Texts(UncheckedBaseModel): + texts: typing.List[str] = pydantic.Field() + """ + An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality. + """ + + model: str = pydantic.Field() + """ + Defaults to embed-english-v2.0 + + The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. + + Available models and corresponding embedding dimensions: + + - `embed-english-v3.0` 1024 + - `embed-multilingual-v3.0` 1024 + - `embed-english-light-v3.0` 384 + - `embed-multilingual-light-v3.0` 384 + + - `embed-english-v2.0` 4096 + - `embed-english-light-v2.0` 1024 + - `embed-multilingual-v2.0` 768 + """ + + embedding_types: typing.Optional[typing.List[EmbeddingType]] = pydantic.Field(default=None) + """ + Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. + + - `"float"`: Use this when you want to get back the default float embeddings. Valid for all models. + - `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models. + - `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models. + - `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models. + - `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models. + """ + + truncate: typing.Optional[TextsTruncate] = pydantic.Field(default=None) + """ + One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. + + Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + + If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/texts_truncate.py b/src/cohere/v2/types/texts_truncate.py new file mode 100644 index 000000000..b0e2faf0e --- /dev/null +++ b/src/cohere/v2/types/texts_truncate.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TextsTruncate = typing.Union[typing.Literal["NONE", "START", "END"], typing.Any] diff --git a/src/cohere/v2/types/tool_content.py b/src/cohere/v2/types/tool_content.py new file mode 100644 index 000000000..08d27ff71 --- /dev/null +++ b/src/cohere/v2/types/tool_content.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from .document import Document +import typing_extensions +from ...core.unchecked_base_model import UnionMetadata + + +class TextToolContent(UncheckedBaseModel): + """ + A content block which contains information about the content of a tool result + """ + + type: typing.Literal["text"] = "text" + text: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class DocumentToolContent(UncheckedBaseModel): + """ + A content block which contains information about the content of a tool result + """ + + type: typing.Literal["document"] = "document" + document: Document + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +ToolContent = typing_extensions.Annotated[ + typing.Union[TextToolContent, DocumentToolContent], UnionMetadata(discriminant="type") +] diff --git a/src/cohere/v2/types/tool_message2.py b/src/cohere/v2/types/tool_message2.py index 515226d72..9b12c352d 100644 --- a/src/cohere/v2/types/tool_message2.py +++ b/src/cohere/v2/types/tool_message2.py @@ -2,8 +2,9 @@ from ...core.unchecked_base_model import UncheckedBaseModel import pydantic -import typing +from .tool_message2tool_content import ToolMessage2ToolContent from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import typing class ToolMessage2(UncheckedBaseModel): @@ -16,9 +17,9 @@ class ToolMessage2(UncheckedBaseModel): The id of the associated tool call that has provided the given content """ - tool_content: typing.List[str] = pydantic.Field() + tool_content: ToolMessage2ToolContent = pydantic.Field() """ - A list of outputs from a tool. The content should formatted as a JSON object string + A single or list of outputs from a tool. The content should formatted as a JSON object string, or a list of tool content blocks """ if IS_PYDANTIC_V2: diff --git a/src/cohere/v2/types/tool_message2tool_content.py b/src/cohere/v2/types/tool_message2tool_content.py new file mode 100644 index 000000000..eafecdcd8 --- /dev/null +++ b/src/cohere/v2/types/tool_message2tool_content.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_content import ToolContent + +ToolMessage2ToolContent = typing.Union[str, typing.List[ToolContent]] diff --git a/src/cohere/v2/types/user_message.py b/src/cohere/v2/types/user_message.py index f73c47177..dd8fc6b16 100644 --- a/src/cohere/v2/types/user_message.py +++ b/src/cohere/v2/types/user_message.py @@ -3,9 +3,8 @@ from ...core.unchecked_base_model import UncheckedBaseModel from .user_message_content import UserMessageContent import pydantic -import typing -from ...types.chat_document import ChatDocument from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import typing class UserMessage(UncheckedBaseModel): @@ -19,11 +18,6 @@ class UserMessage(UncheckedBaseModel): If a string is provided, it will be treated as a text content block. """ - documents: typing.Optional[typing.List[ChatDocument]] = pydantic.Field(default=None) - """ - Documents seen by the model when generating the reply. - """ - if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/cohere/v2/types/v2chat_request_citation_mode.py b/src/cohere/v2/types/v2chat_request_citation_mode.py deleted file mode 100644 index 9b5dc7d5a..000000000 --- a/src/cohere/v2/types/v2chat_request_citation_mode.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -V2ChatRequestCitationMode = typing.Union[typing.Literal["FAST", "ACCURATE", "OFF"], typing.Any] diff --git a/src/cohere/v2/types/v2chat_request_documents_item.py b/src/cohere/v2/types/v2chat_request_documents_item.py new file mode 100644 index 000000000..3850eb0f7 --- /dev/null +++ b/src/cohere/v2/types/v2chat_request_documents_item.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .document import Document + +V2ChatRequestDocumentsItem = typing.Union[str, Document] diff --git a/src/cohere/v2/types/v2chat_stream_request_citation_mode.py b/src/cohere/v2/types/v2chat_stream_request_citation_mode.py deleted file mode 100644 index 2e07c9ebe..000000000 --- a/src/cohere/v2/types/v2chat_stream_request_citation_mode.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -V2ChatStreamRequestCitationMode = typing.Union[typing.Literal["FAST", "ACCURATE", "OFF"], typing.Any] diff --git a/src/cohere/v2/types/v2chat_stream_request_documents_item.py b/src/cohere/v2/types/v2chat_stream_request_documents_item.py new file mode 100644 index 000000000..f50059e48 --- /dev/null +++ b/src/cohere/v2/types/v2chat_stream_request_documents_item.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .document import Document + +V2ChatStreamRequestDocumentsItem = typing.Union[str, Document] diff --git a/src/cohere/v2/types/v2embed_request.py b/src/cohere/v2/types/v2embed_request.py new file mode 100644 index 000000000..860404e25 --- /dev/null +++ b/src/cohere/v2/types/v2embed_request.py @@ -0,0 +1,107 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +from ...types.embedding_type import EmbeddingType +from .texts_truncate import TextsTruncate +from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +import typing_extensions +from ...core.unchecked_base_model import UnionMetadata + + +class SearchDocumentV2EmbedRequest(UncheckedBaseModel): + input_type: typing.Literal["search_document"] = "search_document" + texts: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + truncate: typing.Optional[TextsTruncate] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class SearchQueryV2EmbedRequest(UncheckedBaseModel): + input_type: typing.Literal["search_query"] = "search_query" + texts: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + truncate: typing.Optional[TextsTruncate] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ClassificationV2EmbedRequest(UncheckedBaseModel): + input_type: typing.Literal["classification"] = "classification" + texts: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + truncate: typing.Optional[TextsTruncate] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ClusteringV2EmbedRequest(UncheckedBaseModel): + input_type: typing.Literal["clustering"] = "clustering" + texts: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + truncate: typing.Optional[TextsTruncate] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ImageV2EmbedRequest(UncheckedBaseModel): + input_type: typing.Literal["image"] = "image" + images: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +V2EmbedRequest = typing_extensions.Annotated[ + typing.Union[ + SearchDocumentV2EmbedRequest, + SearchQueryV2EmbedRequest, + ClassificationV2EmbedRequest, + ClusteringV2EmbedRequest, + ImageV2EmbedRequest, + ], + UnionMetadata(discriminant="input_type"), +]