From cdd01e496a83f33a4dbfefa4d94612d978704604 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 09:04:26 -0500 Subject: [PATCH] :herb: Fern Regeneration -- September 19, 2024 (#583) * SDK regeneration * Fixes * Fixes --------- Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Co-authored-by: Billy Trend --- poetry.lock | 245 +-- pyproject.toml | 2 +- reference.md | 289 ++- src/cohere/__init__.py | 348 ++-- src/cohere/base_client.py | 32 +- src/cohere/core/client_wrapper.py | 2 +- src/cohere/types/__init__.py | 257 ++- .../{v2 => }/types/assistant_message.py | 8 +- .../types/assistant_message_content.py | 0 .../types/assistant_message_content_item.py | 10 +- .../types/assistant_message_response.py | 8 +- ...assistant_message_response_content_item.py | 10 +- .../types/chat_content_delta_event.py | 2 +- .../types/chat_content_delta_event_delta.py | 4 +- .../chat_content_delta_event_delta_message.py | 4 +- ...ntent_delta_event_delta_message_content.py | 4 +- .../{v2 => }/types/chat_content_end_event.py | 2 +- .../types/chat_content_start_event.py | 2 +- .../types/chat_content_start_event_delta.py | 4 +- .../chat_content_start_event_delta_message.py | 4 +- ...ntent_start_event_delta_message_content.py | 4 +- .../{v2 => }/types/chat_finish_reason.py | 0 .../{v2 => }/types/chat_message_end_event.py | 2 +- .../types/chat_message_end_event_delta.py | 4 +- .../types/chat_message_start_event.py | 2 +- .../types/chat_message_start_event_delta.py | 4 +- .../chat_message_start_event_delta_message.py | 4 +- .../chat_message_v2.py} | 27 +- src/cohere/types/chat_messages.py | 6 + .../chat_response.py} | 6 +- .../{v2 => }/types/chat_stream_event_type.py | 4 +- .../types/chat_tool_call_delta_event.py | 2 +- .../types/chat_tool_call_delta_event_delta.py | 4 +- ...t_tool_call_delta_event_delta_tool_call.py | 4 +- ...ll_delta_event_delta_tool_call_function.py | 4 +- .../types/chat_tool_call_end_event.py | 2 +- .../types/chat_tool_call_start_event.py | 2 +- .../types/chat_tool_call_start_event_delta.py | 4 +- ...t_tool_call_start_event_delta_tool_call.py | 4 +- ...ll_start_event_delta_tool_call_function.py | 4 +- .../types/chat_tool_plan_delta_event.py | 2 +- .../types/chat_tool_plan_delta_event_delta.py | 4 +- src/cohere/{v2 => }/types/citation.py | 4 +- .../{v2 => }/types/citation_end_event.py | 2 +- src/cohere/types/citation_options.py | 28 + src/cohere/types/citation_options_mode.py | 5 + .../{v2 => }/types/citation_start_event.py | 2 +- .../types/citation_start_event_delta.py | 4 +- .../citation_start_event_delta_message.py | 4 +- src/cohere/{v2 => }/types/content.py | 10 +- src/cohere/types/document.py | 33 + src/cohere/types/document_content.py | 24 + src/cohere/{v2 => }/types/document_source.py | 4 +- src/cohere/types/embed_request_v2.py | 107 ++ src/cohere/types/embed_response.py | 6 +- .../types/generate_streamed_response.py | 10 +- src/cohere/types/images.py | 50 + .../json_response_format_v2.py} | 6 +- src/cohere/types/message.py | 10 +- src/cohere/types/response_format.py | 6 +- .../response_format_v2.py} | 14 +- src/cohere/{v2 => }/types/source.py | 12 +- src/cohere/types/streamed_chat_response.py | 32 +- .../streamed_chat_response_v2.py} | 52 +- src/cohere/{v2 => }/types/system_message.py | 4 +- .../{v2 => }/types/system_message_content.py | 0 .../types/system_message_content_item.py | 12 +- src/cohere/{v2 => }/types/text_content.py | 4 +- .../text_response_format_v2.py} | 6 +- src/cohere/types/texts.py | 62 + src/cohere/types/texts_truncate.py | 5 + .../tool_call2.py => types/tool_call_v2.py} | 10 +- .../tool_call_v2function.py} | 6 +- src/cohere/types/tool_content.py | 51 + src/cohere/types/tool_message_v2.py | 32 + .../types/tool_message_v2tool_content.py | 6 + src/cohere/{v2 => }/types/tool_source.py | 4 +- src/cohere/types/tool_v2.py | 24 + .../tool_v2function.py} | 6 +- src/cohere/{v2 => }/types/usage.py | 4 +- .../{v2 => }/types/usage_billed_units.py | 4 +- src/cohere/{v2 => }/types/usage_tokens.py | 4 +- src/cohere/{v2 => }/types/user_message.py | 10 +- .../{v2 => }/types/user_message_content.py | 0 src/cohere/utils.py | 10 +- src/cohere/v2/__init__.py | 186 +- src/cohere/v2/client.py | 1649 ++++++++++++----- src/cohere/v2/types/__init__.py | 185 +- src/cohere/v2/types/chat_messages.py | 6 - .../v2/types/v2chat_request_citation_mode.py | 5 - .../v2/types/v2chat_request_documents_item.py | 6 + .../v2chat_stream_request_citation_mode.py | 5 - .../v2chat_stream_request_documents_item.py | 6 + .../types/v2rerank_request_documents_item.py | 6 + ...{tool_message2.py => v2rerank_response.py} | 20 +- .../types/v2rerank_response_results_item.py | 33 + ...2rerank_response_results_item_document.py} | 14 +- tests/test_async_client.py | 10 +- tests/test_client.py | 12 +- tests/test_client_v2.py | 20 +- tests/test_embed_utils.py | 22 +- 101 files changed, 2791 insertions(+), 1419 deletions(-) rename src/cohere/{v2 => }/types/assistant_message.py (78%) rename src/cohere/{v2 => }/types/assistant_message_content.py (100%) rename src/cohere/{v2 => }/types/assistant_message_content_item.py (65%) rename src/cohere/{v2 => }/types/assistant_message_response.py (80%) rename src/cohere/{v2 => }/types/assistant_message_response_content_item.py (64%) rename src/cohere/{v2 => }/types/chat_content_delta_event.py (93%) rename src/cohere/{v2 => }/types/chat_content_delta_event_delta.py (84%) rename src/cohere/{v2 => }/types/chat_content_delta_event_delta_message.py (84%) rename src/cohere/{v2 => }/types/chat_content_delta_event_delta_message_content.py (81%) rename src/cohere/{v2 => }/types/chat_content_end_event.py (92%) rename src/cohere/{v2 => }/types/chat_content_start_event.py (93%) rename src/cohere/{v2 => }/types/chat_content_start_event_delta.py (84%) rename src/cohere/{v2 => }/types/chat_content_start_event_delta_message.py (84%) rename src/cohere/{v2 => }/types/chat_content_start_event_delta_message_content.py (82%) rename src/cohere/{v2 => }/types/chat_finish_reason.py (100%) rename src/cohere/{v2 => }/types/chat_message_end_event.py (93%) rename src/cohere/{v2 => }/types/chat_message_end_event_delta.py (84%) rename src/cohere/{v2 => }/types/chat_message_start_event.py (93%) rename src/cohere/{v2 => }/types/chat_message_start_event_delta.py (84%) rename src/cohere/{v2 => }/types/chat_message_start_event_delta_message.py (83%) rename src/cohere/{v2/types/chat_message2.py => types/chat_message_v2.py} (76%) create mode 100644 src/cohere/types/chat_messages.py rename src/cohere/{v2/types/non_streamed_chat_response2.py => types/chat_response.py} (85%) rename src/cohere/{v2 => }/types/chat_stream_event_type.py (80%) rename src/cohere/{v2 => }/types/chat_tool_call_delta_event.py (93%) rename src/cohere/{v2 => }/types/chat_tool_call_delta_event_delta.py (84%) rename src/cohere/{v2 => }/types/chat_tool_call_delta_event_delta_tool_call.py (84%) rename src/cohere/{v2 => }/types/chat_tool_call_delta_event_delta_tool_call_function.py (81%) rename src/cohere/{v2 => }/types/chat_tool_call_end_event.py (92%) rename src/cohere/{v2 => }/types/chat_tool_call_start_event.py (93%) rename src/cohere/{v2 => }/types/chat_tool_call_start_event_delta.py (84%) rename src/cohere/{v2 => }/types/chat_tool_call_start_event_delta_tool_call.py (86%) rename src/cohere/{v2 => }/types/chat_tool_call_start_event_delta_tool_call_function.py (82%) rename src/cohere/{v2 => }/types/chat_tool_plan_delta_event.py (92%) rename src/cohere/{v2 => }/types/chat_tool_plan_delta_event_delta.py (81%) rename src/cohere/{v2 => }/types/citation.py (86%) rename src/cohere/{v2 => }/types/citation_end_event.py (91%) create mode 100644 src/cohere/types/citation_options.py create mode 100644 src/cohere/types/citation_options_mode.py rename src/cohere/{v2 => }/types/citation_start_event.py (93%) rename src/cohere/{v2 => }/types/citation_start_event_delta.py (83%) rename src/cohere/{v2 => }/types/citation_start_event_delta_message.py (82%) rename src/cohere/{v2 => }/types/content.py (67%) create mode 100644 src/cohere/types/document.py create mode 100644 src/cohere/types/document_content.py rename src/cohere/{v2 => }/types/document_source.py (86%) create mode 100644 src/cohere/types/embed_request_v2.py create mode 100644 src/cohere/types/images.py rename src/cohere/{v2/types/json_response_format2.py => types/json_response_format_v2.py} (86%) rename src/cohere/{v2/types/response_format2.py => types/response_format_v2.py} (86%) rename src/cohere/{v2 => }/types/source.py (77%) rename src/cohere/{v2/types/streamed_chat_response2.py => types/streamed_chat_response_v2.py} (83%) rename src/cohere/{v2 => }/types/system_message.py (83%) rename src/cohere/{v2 => }/types/system_message_content.py (100%) rename src/cohere/{v2 => }/types/system_message_content_item.py (58%) rename src/cohere/{v2 => }/types/text_content.py (81%) rename src/cohere/{v2/types/text_response_format2.py => types/text_response_format_v2.py} (70%) create mode 100644 src/cohere/types/texts.py create mode 100644 src/cohere/types/texts_truncate.py rename src/cohere/{v2/types/tool_call2.py => types/tool_call_v2.py} (67%) rename src/cohere/{v2/types/tool_call2function.py => types/tool_call_v2function.py} (74%) create mode 100644 src/cohere/types/tool_content.py create mode 100644 src/cohere/types/tool_message_v2.py create mode 100644 src/cohere/types/tool_message_v2tool_content.py rename src/cohere/{v2 => }/types/tool_source.py (84%) create mode 100644 src/cohere/types/tool_v2.py rename src/cohere/{v2/types/tool2function.py => types/tool_v2function.py} (84%) rename src/cohere/{v2 => }/types/usage.py (84%) rename src/cohere/{v2 => }/types/usage_billed_units.py (89%) rename src/cohere/{v2 => }/types/usage_tokens.py (86%) rename src/cohere/{v2 => }/types/user_message.py (69%) rename src/cohere/{v2 => }/types/user_message_content.py (100%) delete mode 100644 src/cohere/v2/types/chat_messages.py delete mode 100644 src/cohere/v2/types/v2chat_request_citation_mode.py create mode 100644 src/cohere/v2/types/v2chat_request_documents_item.py delete mode 100644 src/cohere/v2/types/v2chat_stream_request_citation_mode.py create mode 100644 src/cohere/v2/types/v2chat_stream_request_documents_item.py create mode 100644 src/cohere/v2/types/v2rerank_request_documents_item.py rename src/cohere/v2/types/{tool_message2.py => v2rerank_response.py} (59%) create mode 100644 src/cohere/v2/types/v2rerank_response_results_item.py rename src/cohere/v2/types/{tool2.py => v2rerank_response_results_item_document.py} (66%) diff --git a/poetry.lock b/poetry.lock index 500cafff0..49a511b27 100644 --- a/poetry.lock +++ b/poetry.lock @@ -16,13 +16,13 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" -version = "4.4.0" +version = "4.5.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.5.0-py3-none-any.whl", hash = "sha256:fdeb095b7cc5a5563175eedd926ec4ae55413bb4be5770c424af0ba46ccb4a78"}, + {file = "anyio-4.5.0.tar.gz", hash = "sha256:c5a275fe5ca0afd788001f58fca1e69e29ce706d746e317d660e21f70c530ef9"}, ] [package.dependencies] @@ -32,23 +32,23 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "boto3" -version = "1.35.18" +version = "1.35.22" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.35.18-py3-none-any.whl", hash = "sha256:71e237d3997cf93425947854d7b121c577944f391ba633afb0659e1015364704"}, - {file = "boto3-1.35.18.tar.gz", hash = "sha256:fd130308f1f49d748a5fc63de92de79a995b51c79af3947ddde8815fcf0684fe"}, + {file = "boto3-1.35.22-py3-none-any.whl", hash = "sha256:2109b632b451c1d4347a93a9abe6dc866c03db4ff1f910597f4543f1965829de"}, + {file = "boto3-1.35.22.tar.gz", hash = "sha256:8f4f6e0860ca1b18cbb8d13f3a572a4c099577e741b10205b5604058af0e75b7"}, ] [package.dependencies] -botocore = ">=1.35.18,<1.36.0" +botocore = ">=1.35.22,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -57,13 +57,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.35.18" +version = "1.35.22" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.18-py3-none-any.whl", hash = "sha256:1027083aeb1fe74057273410fd768e018e22f85adfbd717b5a69f578f7812b80"}, - {file = "botocore-1.35.18.tar.gz", hash = "sha256:e59da8b91ab06683d2725b6cbbb0383b30c68a241c3c63363f4c5bff59b3c0c0"}, + {file = "botocore-1.35.22-py3-none-any.whl", hash = "sha256:d9bc656e7dde0b3e3f3080fc54bacff6a97fd7806b98acbcc21c7f9d4d0102b9"}, + {file = "botocore-1.35.22.tar.gz", hash = "sha256:18362b7ec748561d786aebf1dd5c9faf22c4732efbf89344314199f96d3bbb65"}, ] [package.dependencies] @@ -260,18 +260,18 @@ zstandard = ["zstandard"] [[package]] name = "filelock" -version = "3.16.0" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.16.0-py3-none-any.whl", hash = "sha256:f6ed4c963184f4c84dd5557ce8fece759a3724b37b80c6c4f20a2f63a4dc6609"}, - {file = "filelock-3.16.0.tar.gz", hash = "sha256:81de9eb8453c769b63369f87f11131a7ab04e367f8d97ad39dc230daa07e3bec"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.1.1)", "pytest (>=8.3.2)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.3)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] typing = ["typing-extensions (>=4.12.2)"] [[package]] @@ -383,13 +383,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.24.7" +version = "0.25.0" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.24.7-py3-none-any.whl", hash = "sha256:a212c555324c8a7b1ffdd07266bb7e7d69ca71aa238d27b7842d65e9a26ac3e5"}, - {file = "huggingface_hub-0.24.7.tar.gz", hash = "sha256:0ad8fb756e2831da0ac0491175b960f341fe06ebcf80ed6f8728313f95fc0207"}, + {file = "huggingface_hub-0.25.0-py3-none-any.whl", hash = "sha256:e2f357b35d72d5012cfd127108c4e14abcd61ba4ebc90a5a374dc2456cb34e12"}, + {file = "huggingface_hub-0.25.0.tar.gz", hash = "sha256:fb5fbe6c12fcd99d187ec7db95db9110fb1a20505f23040a5449a717c1a0db4d"}, ] [package.dependencies] @@ -417,15 +417,18 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t [[package]] name = "idna" -version = "3.8" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, - {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "iniconfig" version = "2.0.0" @@ -547,18 +550,18 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.9.1" +version = "2.9.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, - {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.3" +pydantic-core = "2.23.4" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -570,100 +573,100 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.23.3" +version = "2.23.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, - {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, - {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, - {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, - {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, - {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, - {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, - {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, - {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, - {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, - {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, - {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, - {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, - {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, ] [package.dependencies] @@ -1047,13 +1050,13 @@ types-urllib3 = "*" [[package]] name = "types-requests" -version = "2.32.0.20240907" +version = "2.32.0.20240914" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240907.tar.gz", hash = "sha256:ff33935f061b5e81ec87997e91050f7b4af4f82027a7a7a9d9aaea04a963fdf8"}, - {file = "types_requests-2.32.0.20240907-py3-none-any.whl", hash = "sha256:1d1e79faeaf9d42def77f3c304893dea17a97cae98168ac69f3cb465516ee8da"}, + {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, + {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index 4230436b8..cd1405841 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "cohere" -version = "5.9.2" +version = "5.9.3" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 9257bd953..c91398c36 100644 --- a/reference.md +++ b/reference.md @@ -12,7 +12,7 @@
Generates a text response to a user message. -To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). +To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
@@ -28,11 +28,11 @@ To learn how to use the Chat API with Streaming and RAG follow our [Text Generat ```python from cohere import ( + ChatbotMessage, ChatConnector, ChatStreamRequestConnectorsSearchOptions, Client, - Message_Chatbot, - ResponseFormat_Text, + TextResponseFormat, Tool, ToolCall, ToolParameterDefinitionsValue, @@ -48,7 +48,7 @@ response = client.chat_stream( model="string", preamble="string", chat_history=[ - Message_Chatbot( + ChatbotMessage( message="string", tool_calls=[ ToolCall( @@ -108,7 +108,7 @@ response = client.chat_stream( ) ], force_single_step=True, - response_format=ResponseFormat_Text(), + response_format=TextResponseFormat(), safety_mode="CONTEXTUAL", ) for chunk in response: @@ -571,7 +571,7 @@ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private D
Generates a text response to a user message. -To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). +To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api).
@@ -586,7 +586,7 @@ To learn how to use the Chat API with Streaming and RAG follow our [Text Generat
```python -from cohere import Client, Message_Tool +from cohere import Client, ToolMessage client = Client( client_name="YOUR_CLIENT_NAME", @@ -594,7 +594,7 @@ client = Client( ) client.chat( message="Can you give me a global market overview of solar panels?", - chat_history=[Message_Tool(), Message_Tool()], + chat_history=[ToolMessage(), ToolMessage()], prompt_truncation="OFF", temperature=0.3, ) @@ -2334,12 +2334,13 @@ Generates a message from the model in response to a provided conversation. To le
```python -from cohere import Client -from cohere.v2 import ( - ChatMessage2_User, - ResponseFormat2_Text, - Tool2, - Tool2Function, +from cohere import ( + CitationOptions, + Client, + TextResponseFormatV2, + ToolV2, + ToolV2Function, + UserChatMessageV2, ) client = Client( @@ -2349,22 +2350,24 @@ client = Client( response = client.v2.chat_stream( model="string", messages=[ - ChatMessage2_User( + UserChatMessageV2( content="string", - documents=[{"string": {"key": "value"}}], ) ], tools=[ - Tool2( - function=Tool2Function( + ToolV2( + function=ToolV2Function( name="string", description="string", parameters={"string": {"key": "value"}}, ), ) ], - citation_mode="FAST", - response_format=ResponseFormat2_Text(), + documents=["string"], + citation_options=CitationOptions( + mode="FAST", + ), + response_format=TextResponseFormatV2(), safety_mode="CONTEXTUAL", max_tokens=1, stop_sequences=["string"], @@ -2409,7 +2412,7 @@ for chunk in response:
-**tools:** `typing.Optional[typing.Sequence[Tool2]]` +**tools:** `typing.Optional[typing.Sequence[ToolV2]]` A list of available tools (functions) that the model may suggest invoking before producing a text response. @@ -2422,11 +2425,16 @@ When `tools` is passed (without `tool_results`), the `text` content in the respo
-**citation_mode:** `typing.Optional[V2ChatStreamRequestCitationMode]` +**documents:** `typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. -Defaults to `"accurate"`. -Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + +
+
+
+
+ +**citation_options:** `typing.Optional[CitationOptions]`
@@ -2434,7 +2442,7 @@ Dictates the approach taken to generating citations as part of the RAG flow by a
-**response_format:** `typing.Optional[ResponseFormat2]` +**response_format:** `typing.Optional[ResponseFormatV2]`
@@ -2602,8 +2610,7 @@ Generates a message from the model in response to a provided conversation. To le
```python -from cohere import Client -from cohere.v2 import ChatMessage2_Tool +from cohere import Client, ToolChatMessageV2 client = Client( client_name="YOUR_CLIENT_NAME", @@ -2612,9 +2619,9 @@ client = Client( client.v2.chat( model="model", messages=[ - ChatMessage2_Tool( + ToolChatMessageV2( tool_call_id="messages", - tool_content=["messages"], + tool_content="messages", ) ], ) @@ -2649,7 +2656,7 @@ client.v2.chat(
-**tools:** `typing.Optional[typing.Sequence[Tool2]]` +**tools:** `typing.Optional[typing.Sequence[ToolV2]]` A list of available tools (functions) that the model may suggest invoking before producing a text response. @@ -2662,11 +2669,16 @@ When `tools` is passed (without `tool_results`), the `text` content in the respo
-**citation_mode:** `typing.Optional[V2ChatRequestCitationMode]` +**documents:** `typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]]` — A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. -Defaults to `"accurate"`. -Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + +
+
+ +
+
+**citation_options:** `typing.Optional[CitationOptions]`
@@ -2674,7 +2686,7 @@ Dictates the approach taken to generating citations as part of the RAG flow by a
-**response_format:** `typing.Optional[ResponseFormat2]` +**response_format:** `typing.Optional[ResponseFormatV2]`
@@ -2811,6 +2823,215 @@ Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
+
+
+ + +
client.v2.embed(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. + +Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. + +If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client, ImageEmbedRequestV2 + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.v2.embed( + request=ImageEmbedRequestV2( + images=["string"], + model="string", + ), +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `EmbedRequestV2` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.v2.rerank(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from cohere import Client + +client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", +) +client.v2.rerank( + model="model", + query="query", + documents=["documents"], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` — The identifier of the model to use, one of : `rerank-english-v3.0`, `rerank-multilingual-v3.0`, `rerank-english-v2.0`, `rerank-multilingual-v2.0` + +
+
+ +
+
+ +**query:** `str` — The search query + +
+
+ +
+
+ +**documents:** `typing.Sequence[V2RerankRequestDocumentsItem]` + +A list of document objects or strings to rerank. +If a document is provided the text fields is required and all other fields will be preserved in the response. + +The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000. + +We recommend a maximum of 1,000 documents for optimal endpoint performance. + +
+
+ +
+
+ +**top_n:** `typing.Optional[int]` — The number of most relevant documents or indices to return, defaults to the length of the documents + +
+
+ +
+
+ +**rank_fields:** `typing.Optional[typing.Sequence[str]]` — If a JSON object is provided, you can specify which keys you would like to have considered for reranking. The model will rerank based on order of the fields passed in (i.e. rank_fields=['title','author','text'] will rerank using the values in title, author, text sequentially. If the length of title, author, and text exceeds the context length of the model, the chunking will not re-consider earlier fields). If not provided, the model will use the default text field for ranking. + +
+
+ +
+
+ +**return_documents:** `typing.Optional[bool]` + +- If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request. +- If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request. + +
+
+ +
+
+ +**max_chunks_per_doc:** `typing.Optional[int]` — The maximum number of chunks to produce internally from a document + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
diff --git a/src/cohere/__init__.py b/src/cohere/__init__.py index adad8ada0..dd374d7d6 100644 --- a/src/cohere/__init__.py +++ b/src/cohere/__init__.py @@ -5,17 +5,41 @@ ApiMetaApiVersion, ApiMetaBilledUnits, ApiMetaTokens, + AssistantChatMessageV2, + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentItem, + AssistantMessageResponse, + AssistantMessageResponseContentItem, AuthTokenType, ChatCitation, ChatCitationGenerationEvent, ChatConnector, + ChatContentDeltaEvent, + ChatContentDeltaEventDelta, + ChatContentDeltaEventDeltaMessage, + ChatContentDeltaEventDeltaMessageContent, + ChatContentEndEvent, + ChatContentStartEvent, + ChatContentStartEventDelta, + ChatContentStartEventDeltaMessage, + ChatContentStartEventDeltaMessageContent, ChatDataMetrics, ChatDocument, + ChatFinishReason, ChatMessage, + ChatMessageEndEvent, + ChatMessageEndEventDelta, + ChatMessageStartEvent, + ChatMessageStartEventDelta, + ChatMessageStartEventDeltaMessage, + ChatMessageV2, + ChatMessages, ChatRequestCitationQuality, ChatRequestConnectorsSearchOptions, ChatRequestPromptTruncation, ChatRequestSafetyMode, + ChatResponse, ChatSearchQueriesGenerationEvent, ChatSearchQuery, ChatSearchResult, @@ -24,15 +48,39 @@ ChatStreamEndEvent, ChatStreamEndEventFinishReason, ChatStreamEvent, + ChatStreamEventType, ChatStreamRequestCitationQuality, ChatStreamRequestConnectorsSearchOptions, ChatStreamRequestPromptTruncation, ChatStreamRequestSafetyMode, ChatStreamStartEvent, ChatTextGenerationEvent, + ChatToolCallDeltaEvent, + ChatToolCallDeltaEventDelta, + ChatToolCallDeltaEventDeltaToolCall, + ChatToolCallDeltaEventDeltaToolCallFunction, + ChatToolCallEndEvent, + ChatToolCallStartEvent, + ChatToolCallStartEventDelta, + ChatToolCallStartEventDeltaToolCall, + ChatToolCallStartEventDeltaToolCallFunction, ChatToolCallsChunkEvent, ChatToolCallsGenerationEvent, + ChatToolPlanDeltaEvent, + ChatToolPlanDeltaEventDelta, + ChatbotMessage, CheckApiKeyResponse, + Citation, + CitationEndEvent, + CitationEndStreamedChatResponseV2, + CitationGenerationStreamedChatResponse, + CitationOptions, + CitationOptionsMode, + CitationStartEvent, + CitationStartEventDelta, + CitationStartEventDeltaMessage, + CitationStartStreamedChatResponseV2, + ClassificationEmbedRequestV2, ClassifyDataMetrics, ClassifyExample, ClassifyRequestTruncate, @@ -41,10 +89,15 @@ ClassifyResponseClassificationsItemClassificationType, ClassifyResponseClassificationsItemLabelsValue, ClientClosedRequestErrorBody, + ClusteringEmbedRequestV2, CompatibleEndpoint, Connector, ConnectorAuthStatus, ConnectorOAuth, + Content, + ContentDeltaStreamedChatResponseV2, + ContentEndStreamedChatResponseV2, + ContentStartStreamedChatResponseV2, CreateConnectorOAuth, CreateConnectorResponse, CreateConnectorServiceAuth, @@ -55,6 +108,10 @@ DatasetValidationStatus, DeleteConnectorResponse, DetokenizeResponse, + Document, + DocumentContent, + DocumentSource, + DocumentToolContent, EmbedByTypeResponse, EmbedByTypeResponseEmbeddings, EmbedFloatsResponse, @@ -63,10 +120,11 @@ EmbedJobStatus, EmbedJobTruncate, EmbedRequestTruncate, + EmbedRequestV2, EmbedResponse, - EmbedResponse_EmbeddingsByType, - EmbedResponse_EmbeddingsFloats, EmbeddingType, + EmbeddingsByTypeEmbedResponse, + EmbeddingsFloatsEmbedResponse, FinetuneDatasetMetrics, FinishReason, GatewayTimeoutErrorBody, @@ -80,22 +138,22 @@ GenerateStreamRequestTruncate, GenerateStreamText, GenerateStreamedResponse, - GenerateStreamedResponse_StreamEnd, - GenerateStreamedResponse_StreamError, - GenerateStreamedResponse_TextGeneration, Generation, GetConnectorResponse, GetModelResponse, + ImageEmbedRequestV2, + Images, + JsonObjectResponseFormat, + JsonObjectResponseFormatV2, JsonResponseFormat, + JsonResponseFormatV2, LabelMetric, ListConnectorsResponse, ListEmbedJobResponse, ListModelsResponse, Message, - Message_Chatbot, - Message_System, - Message_Tool, - Message_User, + MessageEndStreamedChatResponseV2, + MessageStartStreamedChatResponseV2, Metrics, MetricsEmbedData, MetricsEmbedDataFieldsItem, @@ -110,35 +168,71 @@ RerankResponseResultsItemDocument, RerankerDataMetrics, ResponseFormat, - ResponseFormat_JsonObject, - ResponseFormat_Text, + ResponseFormatV2, + SearchDocumentEmbedRequestV2, + SearchQueriesGenerationStreamedChatResponse, + SearchQueryEmbedRequestV2, + SearchResultsStreamedChatResponse, SingleGeneration, SingleGenerationInStream, SingleGenerationTokenLikelihoodsItem, + Source, + StreamEndGenerateStreamedResponse, + StreamEndStreamedChatResponse, + StreamErrorGenerateStreamedResponse, + StreamStartStreamedChatResponse, StreamedChatResponse, - StreamedChatResponse_CitationGeneration, - StreamedChatResponse_SearchQueriesGeneration, - StreamedChatResponse_SearchResults, - StreamedChatResponse_StreamEnd, - StreamedChatResponse_StreamStart, - StreamedChatResponse_TextGeneration, - StreamedChatResponse_ToolCallsChunk, - StreamedChatResponse_ToolCallsGeneration, + StreamedChatResponseV2, SummarizeRequestExtractiveness, SummarizeRequestFormat, SummarizeRequestLength, SummarizeResponse, + SystemChatMessageV2, + SystemMessage, + SystemMessageContent, + SystemMessageContentItem, + TextAssistantMessageContentItem, + TextAssistantMessageResponseContentItem, + TextContent, + TextGenerationGenerateStreamedResponse, + TextGenerationStreamedChatResponse, TextResponseFormat, + TextResponseFormatV2, + TextSystemMessageContentItem, + TextToolContent, + Texts, + TextsTruncate, TokenizeResponse, TooManyRequestsErrorBody, Tool, ToolCall, ToolCallDelta, + ToolCallDeltaStreamedChatResponseV2, + ToolCallEndStreamedChatResponseV2, + ToolCallStartStreamedChatResponseV2, + ToolCallV2, + ToolCallV2Function, + ToolCallsChunkStreamedChatResponse, + ToolCallsGenerationStreamedChatResponse, + ToolChatMessageV2, + ToolContent, ToolMessage, + ToolMessageV2, + ToolMessageV2ToolContent, ToolParameterDefinitionsValue, + ToolPlanDeltaStreamedChatResponseV2, ToolResult, + ToolSource, + ToolV2, + ToolV2Function, UnprocessableEntityErrorBody, UpdateConnectorResponse, + Usage, + UsageBilledUnits, + UsageTokens, + UserChatMessageV2, + UserMessage, + UserMessageContent, ) from .errors import ( BadRequestError, @@ -169,95 +263,14 @@ from .environment import ClientEnvironment from .sagemaker_client import SagemakerClient from .v2 import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentItem, - AssistantMessageContentItem_Text, - AssistantMessageResponse, - AssistantMessageResponseContentItem, - AssistantMessageResponseContentItem_Text, - ChatContentDeltaEvent, - ChatContentDeltaEventDelta, - ChatContentDeltaEventDeltaMessage, - ChatContentDeltaEventDeltaMessageContent, - ChatContentEndEvent, - ChatContentStartEvent, - ChatContentStartEventDelta, - ChatContentStartEventDeltaMessage, - ChatContentStartEventDeltaMessageContent, - ChatFinishReason, - ChatMessage2, - ChatMessage2_Assistant, - ChatMessage2_System, - ChatMessage2_Tool, - ChatMessage2_User, - ChatMessageEndEvent, - ChatMessageEndEventDelta, - ChatMessageStartEvent, - ChatMessageStartEventDelta, - ChatMessageStartEventDeltaMessage, - ChatMessages, - ChatStreamEventType, - ChatToolCallDeltaEvent, - ChatToolCallDeltaEventDelta, - ChatToolCallDeltaEventDeltaToolCall, - ChatToolCallDeltaEventDeltaToolCallFunction, - ChatToolCallEndEvent, - ChatToolCallStartEvent, - ChatToolCallStartEventDelta, - ChatToolCallStartEventDeltaToolCall, - ChatToolCallStartEventDeltaToolCallFunction, - ChatToolPlanDeltaEvent, - ChatToolPlanDeltaEventDelta, - Citation, - CitationEndEvent, - CitationStartEvent, - CitationStartEventDelta, - CitationStartEventDeltaMessage, - Content, - Content_Text, - DocumentSource, - JsonResponseFormat2, - NonStreamedChatResponse2, - ResponseFormat2, - ResponseFormat2_JsonObject, - ResponseFormat2_Text, - Source, - Source_Document, - Source_Tool, - StreamedChatResponse2, - StreamedChatResponse2_CitationEnd, - StreamedChatResponse2_CitationStart, - StreamedChatResponse2_ContentDelta, - StreamedChatResponse2_ContentEnd, - StreamedChatResponse2_ContentStart, - StreamedChatResponse2_MessageEnd, - StreamedChatResponse2_MessageStart, - StreamedChatResponse2_ToolCallDelta, - StreamedChatResponse2_ToolCallEnd, - StreamedChatResponse2_ToolCallStart, - StreamedChatResponse2_ToolPlanDelta, - SystemMessage, - SystemMessageContent, - SystemMessageContentItem, - SystemMessageContentItem_Text, - TextContent, - TextResponseFormat2, - Tool2, - Tool2Function, - ToolCall2, - ToolCall2Function, - ToolMessage2, - ToolSource, - Usage, - UsageBilledUnits, - UsageTokens, - UserMessage, - UserMessageContent, - V2ChatRequestCitationMode, + V2ChatRequestDocumentsItem, V2ChatRequestSafetyMode, - V2ChatStreamRequestCitationMode, + V2ChatStreamRequestDocumentsItem, V2ChatStreamRequestSafetyMode, + V2RerankRequestDocumentsItem, + V2RerankResponse, + V2RerankResponseResultsItem, + V2RerankResponseResultsItemDocument, ) from .version import __version__ @@ -266,13 +279,12 @@ "ApiMetaApiVersion", "ApiMetaBilledUnits", "ApiMetaTokens", + "AssistantChatMessageV2", "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentItem", - "AssistantMessageContentItem_Text", "AssistantMessageResponse", "AssistantMessageResponseContentItem", - "AssistantMessageResponseContentItem_Text", "AsyncClient", "AsyncClientV2", "AuthTokenType", @@ -295,21 +307,18 @@ "ChatDocument", "ChatFinishReason", "ChatMessage", - "ChatMessage2", - "ChatMessage2_Assistant", - "ChatMessage2_System", - "ChatMessage2_Tool", - "ChatMessage2_User", "ChatMessageEndEvent", "ChatMessageEndEventDelta", "ChatMessageStartEvent", "ChatMessageStartEventDelta", "ChatMessageStartEventDeltaMessage", + "ChatMessageV2", "ChatMessages", "ChatRequestCitationQuality", "ChatRequestConnectorsSearchOptions", "ChatRequestPromptTruncation", "ChatRequestSafetyMode", + "ChatResponse", "ChatSearchQueriesGenerationEvent", "ChatSearchQuery", "ChatSearchResult", @@ -338,12 +347,19 @@ "ChatToolCallsGenerationEvent", "ChatToolPlanDeltaEvent", "ChatToolPlanDeltaEventDelta", + "ChatbotMessage", "CheckApiKeyResponse", "Citation", "CitationEndEvent", + "CitationEndStreamedChatResponseV2", + "CitationGenerationStreamedChatResponse", + "CitationOptions", + "CitationOptionsMode", "CitationStartEvent", "CitationStartEventDelta", "CitationStartEventDeltaMessage", + "CitationStartStreamedChatResponseV2", + "ClassificationEmbedRequestV2", "ClassifyDataMetrics", "ClassifyExample", "ClassifyRequestTruncate", @@ -356,12 +372,15 @@ "ClientClosedRequestErrorBody", "ClientEnvironment", "ClientV2", + "ClusteringEmbedRequestV2", "CompatibleEndpoint", "Connector", "ConnectorAuthStatus", "ConnectorOAuth", "Content", - "Content_Text", + "ContentDeltaStreamedChatResponseV2", + "ContentEndStreamedChatResponseV2", + "ContentStartStreamedChatResponseV2", "CreateConnectorOAuth", "CreateConnectorResponse", "CreateConnectorServiceAuth", @@ -378,7 +397,10 @@ "DatasetsListResponse", "DeleteConnectorResponse", "DetokenizeResponse", + "Document", + "DocumentContent", "DocumentSource", + "DocumentToolContent", "EmbedByTypeResponse", "EmbedByTypeResponseEmbeddings", "EmbedFloatsResponse", @@ -387,10 +409,11 @@ "EmbedJobStatus", "EmbedJobTruncate", "EmbedRequestTruncate", + "EmbedRequestV2", "EmbedResponse", - "EmbedResponse_EmbeddingsByType", - "EmbedResponse_EmbeddingsFloats", "EmbeddingType", + "EmbeddingsByTypeEmbedResponse", + "EmbeddingsFloatsEmbedResponse", "FinetuneDatasetMetrics", "FinishReason", "ForbiddenError", @@ -406,29 +429,27 @@ "GenerateStreamRequestTruncate", "GenerateStreamText", "GenerateStreamedResponse", - "GenerateStreamedResponse_StreamEnd", - "GenerateStreamedResponse_StreamError", - "GenerateStreamedResponse_TextGeneration", "Generation", "GetConnectorResponse", "GetModelResponse", + "ImageEmbedRequestV2", + "Images", "InternalServerError", + "JsonObjectResponseFormat", + "JsonObjectResponseFormatV2", "JsonResponseFormat", - "JsonResponseFormat2", + "JsonResponseFormatV2", "LabelMetric", "ListConnectorsResponse", "ListEmbedJobResponse", "ListModelsResponse", "Message", - "Message_Chatbot", - "Message_System", - "Message_Tool", - "Message_User", + "MessageEndStreamedChatResponseV2", + "MessageStartStreamedChatResponseV2", "Metrics", "MetricsEmbedData", "MetricsEmbedDataFieldsItem", "NonStreamedChatResponse", - "NonStreamedChatResponse2", "NotFoundError", "NotImplementedError", "NotImplementedErrorBody", @@ -441,66 +462,66 @@ "RerankResponseResultsItemDocument", "RerankerDataMetrics", "ResponseFormat", - "ResponseFormat2", - "ResponseFormat2_JsonObject", - "ResponseFormat2_Text", - "ResponseFormat_JsonObject", - "ResponseFormat_Text", + "ResponseFormatV2", "SagemakerClient", + "SearchDocumentEmbedRequestV2", + "SearchQueriesGenerationStreamedChatResponse", + "SearchQueryEmbedRequestV2", + "SearchResultsStreamedChatResponse", "ServiceUnavailableError", "SingleGeneration", "SingleGenerationInStream", "SingleGenerationTokenLikelihoodsItem", "Source", - "Source_Document", - "Source_Tool", + "StreamEndGenerateStreamedResponse", + "StreamEndStreamedChatResponse", + "StreamErrorGenerateStreamedResponse", + "StreamStartStreamedChatResponse", "StreamedChatResponse", - "StreamedChatResponse2", - "StreamedChatResponse2_CitationEnd", - "StreamedChatResponse2_CitationStart", - "StreamedChatResponse2_ContentDelta", - "StreamedChatResponse2_ContentEnd", - "StreamedChatResponse2_ContentStart", - "StreamedChatResponse2_MessageEnd", - "StreamedChatResponse2_MessageStart", - "StreamedChatResponse2_ToolCallDelta", - "StreamedChatResponse2_ToolCallEnd", - "StreamedChatResponse2_ToolCallStart", - "StreamedChatResponse2_ToolPlanDelta", - "StreamedChatResponse_CitationGeneration", - "StreamedChatResponse_SearchQueriesGeneration", - "StreamedChatResponse_SearchResults", - "StreamedChatResponse_StreamEnd", - "StreamedChatResponse_StreamStart", - "StreamedChatResponse_TextGeneration", - "StreamedChatResponse_ToolCallsChunk", - "StreamedChatResponse_ToolCallsGeneration", + "StreamedChatResponseV2", "SummarizeRequestExtractiveness", "SummarizeRequestFormat", "SummarizeRequestLength", "SummarizeResponse", + "SystemChatMessageV2", "SystemMessage", "SystemMessageContent", "SystemMessageContentItem", - "SystemMessageContentItem_Text", + "TextAssistantMessageContentItem", + "TextAssistantMessageResponseContentItem", "TextContent", + "TextGenerationGenerateStreamedResponse", + "TextGenerationStreamedChatResponse", "TextResponseFormat", - "TextResponseFormat2", + "TextResponseFormatV2", + "TextSystemMessageContentItem", + "TextToolContent", + "Texts", + "TextsTruncate", "TokenizeResponse", "TooManyRequestsError", "TooManyRequestsErrorBody", "Tool", - "Tool2", - "Tool2Function", "ToolCall", - "ToolCall2", - "ToolCall2Function", "ToolCallDelta", + "ToolCallDeltaStreamedChatResponseV2", + "ToolCallEndStreamedChatResponseV2", + "ToolCallStartStreamedChatResponseV2", + "ToolCallV2", + "ToolCallV2Function", + "ToolCallsChunkStreamedChatResponse", + "ToolCallsGenerationStreamedChatResponse", + "ToolChatMessageV2", + "ToolContent", "ToolMessage", - "ToolMessage2", + "ToolMessageV2", + "ToolMessageV2ToolContent", "ToolParameterDefinitionsValue", + "ToolPlanDeltaStreamedChatResponseV2", "ToolResult", "ToolSource", + "ToolV2", + "ToolV2Function", "UnauthorizedError", "UnprocessableEntityError", "UnprocessableEntityErrorBody", @@ -508,12 +529,17 @@ "Usage", "UsageBilledUnits", "UsageTokens", + "UserChatMessageV2", "UserMessage", "UserMessageContent", - "V2ChatRequestCitationMode", + "V2ChatRequestDocumentsItem", "V2ChatRequestSafetyMode", - "V2ChatStreamRequestCitationMode", + "V2ChatStreamRequestDocumentsItem", "V2ChatStreamRequestSafetyMode", + "V2RerankRequestDocumentsItem", + "V2RerankResponse", + "V2RerankResponseResultsItem", + "V2RerankResponseResultsItemDocument", "__version__", "connectors", "datasets", diff --git a/src/cohere/base_client.py b/src/cohere/base_client.py index 910b0dafb..24039b71c 100644 --- a/src/cohere/base_client.py +++ b/src/cohere/base_client.py @@ -186,7 +186,7 @@ def chat_stream( ) -> typing.Iterator[StreamedChatResponse]: """ Generates a text response to a user message. - To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). + To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). Parameters ---------- @@ -437,11 +437,11 @@ def chat_stream( Examples -------- from cohere import ( + ChatbotMessage, ChatConnector, ChatStreamRequestConnectorsSearchOptions, Client, - Message_Chatbot, - ResponseFormat_Text, + TextResponseFormat, Tool, ToolCall, ToolParameterDefinitionsValue, @@ -457,7 +457,7 @@ def chat_stream( model="string", preamble="string", chat_history=[ - Message_Chatbot( + ChatbotMessage( message="string", tool_calls=[ ToolCall( @@ -517,7 +517,7 @@ def chat_stream( ) ], force_single_step=True, - response_format=ResponseFormat_Text(), + response_format=TextResponseFormat(), safety_mode="CONTEXTUAL", ) for chunk in response: @@ -737,7 +737,7 @@ def chat( ) -> NonStreamedChatResponse: """ Generates a text response to a user message. - To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). + To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). Parameters ---------- @@ -987,7 +987,7 @@ def chat( Examples -------- - from cohere import Client, Message_Tool + from cohere import Client, ToolMessage client = Client( client_name="YOUR_CLIENT_NAME", @@ -995,7 +995,7 @@ def chat( ) client.chat( message="Can you give me a global market overview of solar panels?", - chat_history=[Message_Tool(), Message_Tool()], + chat_history=[ToolMessage(), ToolMessage()], prompt_truncation="OFF", temperature=0.3, ) @@ -3181,7 +3181,7 @@ async def chat_stream( ) -> typing.AsyncIterator[StreamedChatResponse]: """ Generates a text response to a user message. - To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). + To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). Parameters ---------- @@ -3435,10 +3435,10 @@ async def chat_stream( from cohere import ( AsyncClient, + ChatbotMessage, ChatConnector, ChatStreamRequestConnectorsSearchOptions, - Message_Chatbot, - ResponseFormat_Text, + TextResponseFormat, Tool, ToolCall, ToolParameterDefinitionsValue, @@ -3457,7 +3457,7 @@ async def main() -> None: model="string", preamble="string", chat_history=[ - Message_Chatbot( + ChatbotMessage( message="string", tool_calls=[ ToolCall( @@ -3517,7 +3517,7 @@ async def main() -> None: ) ], force_single_step=True, - response_format=ResponseFormat_Text(), + response_format=TextResponseFormat(), safety_mode="CONTEXTUAL", ) async for chunk in response: @@ -3740,7 +3740,7 @@ async def chat( ) -> NonStreamedChatResponse: """ Generates a text response to a user message. - To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). + To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). Parameters ---------- @@ -3992,7 +3992,7 @@ async def chat( -------- import asyncio - from cohere import AsyncClient, Message_Tool + from cohere import AsyncClient, ToolMessage client = AsyncClient( client_name="YOUR_CLIENT_NAME", @@ -4003,7 +4003,7 @@ async def chat( async def main() -> None: await client.chat( message="Can you give me a global market overview of solar panels?", - chat_history=[Message_Tool(), Message_Tool()], + chat_history=[ToolMessage(), ToolMessage()], prompt_truncation="OFF", temperature=0.3, ) diff --git a/src/cohere/core/client_wrapper.py b/src/cohere/core/client_wrapper.py index 54cd9787c..10e94aea9 100644 --- a/src/cohere/core/client_wrapper.py +++ b/src/cohere/core/client_wrapper.py @@ -24,7 +24,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "cohere", - "X-Fern-SDK-Version": "5.9.2", + "X-Fern-SDK-Version": "5.9.3", } if self._client_name is not None: headers["X-Client-Name"] = self._client_name diff --git a/src/cohere/types/__init__.py b/src/cohere/types/__init__.py index ea6d61264..106547ce1 100644 --- a/src/cohere/types/__init__.py +++ b/src/cohere/types/__init__.py @@ -4,17 +4,49 @@ from .api_meta_api_version import ApiMetaApiVersion from .api_meta_billed_units import ApiMetaBilledUnits from .api_meta_tokens import ApiMetaTokens +from .assistant_message import AssistantMessage +from .assistant_message_content import AssistantMessageContent +from .assistant_message_content_item import AssistantMessageContentItem, TextAssistantMessageContentItem +from .assistant_message_response import AssistantMessageResponse +from .assistant_message_response_content_item import ( + AssistantMessageResponseContentItem, + TextAssistantMessageResponseContentItem, +) from .auth_token_type import AuthTokenType from .chat_citation import ChatCitation from .chat_citation_generation_event import ChatCitationGenerationEvent from .chat_connector import ChatConnector +from .chat_content_delta_event import ChatContentDeltaEvent +from .chat_content_delta_event_delta import ChatContentDeltaEventDelta +from .chat_content_delta_event_delta_message import ChatContentDeltaEventDeltaMessage +from .chat_content_delta_event_delta_message_content import ChatContentDeltaEventDeltaMessageContent +from .chat_content_end_event import ChatContentEndEvent +from .chat_content_start_event import ChatContentStartEvent +from .chat_content_start_event_delta import ChatContentStartEventDelta +from .chat_content_start_event_delta_message import ChatContentStartEventDeltaMessage +from .chat_content_start_event_delta_message_content import ChatContentStartEventDeltaMessageContent from .chat_data_metrics import ChatDataMetrics from .chat_document import ChatDocument +from .chat_finish_reason import ChatFinishReason from .chat_message import ChatMessage +from .chat_message_end_event import ChatMessageEndEvent +from .chat_message_end_event_delta import ChatMessageEndEventDelta +from .chat_message_start_event import ChatMessageStartEvent +from .chat_message_start_event_delta import ChatMessageStartEventDelta +from .chat_message_start_event_delta_message import ChatMessageStartEventDeltaMessage +from .chat_message_v2 import ( + AssistantChatMessageV2, + ChatMessageV2, + SystemChatMessageV2, + ToolChatMessageV2, + UserChatMessageV2, +) +from .chat_messages import ChatMessages from .chat_request_citation_quality import ChatRequestCitationQuality from .chat_request_connectors_search_options import ChatRequestConnectorsSearchOptions from .chat_request_prompt_truncation import ChatRequestPromptTruncation from .chat_request_safety_mode import ChatRequestSafetyMode +from .chat_response import ChatResponse from .chat_search_queries_generation_event import ChatSearchQueriesGenerationEvent from .chat_search_query import ChatSearchQuery from .chat_search_result import ChatSearchResult @@ -23,15 +55,34 @@ from .chat_stream_end_event import ChatStreamEndEvent from .chat_stream_end_event_finish_reason import ChatStreamEndEventFinishReason from .chat_stream_event import ChatStreamEvent +from .chat_stream_event_type import ChatStreamEventType from .chat_stream_request_citation_quality import ChatStreamRequestCitationQuality from .chat_stream_request_connectors_search_options import ChatStreamRequestConnectorsSearchOptions from .chat_stream_request_prompt_truncation import ChatStreamRequestPromptTruncation from .chat_stream_request_safety_mode import ChatStreamRequestSafetyMode from .chat_stream_start_event import ChatStreamStartEvent from .chat_text_generation_event import ChatTextGenerationEvent +from .chat_tool_call_delta_event import ChatToolCallDeltaEvent +from .chat_tool_call_delta_event_delta import ChatToolCallDeltaEventDelta +from .chat_tool_call_delta_event_delta_tool_call import ChatToolCallDeltaEventDeltaToolCall +from .chat_tool_call_delta_event_delta_tool_call_function import ChatToolCallDeltaEventDeltaToolCallFunction +from .chat_tool_call_end_event import ChatToolCallEndEvent +from .chat_tool_call_start_event import ChatToolCallStartEvent +from .chat_tool_call_start_event_delta import ChatToolCallStartEventDelta +from .chat_tool_call_start_event_delta_tool_call import ChatToolCallStartEventDeltaToolCall +from .chat_tool_call_start_event_delta_tool_call_function import ChatToolCallStartEventDeltaToolCallFunction from .chat_tool_calls_chunk_event import ChatToolCallsChunkEvent from .chat_tool_calls_generation_event import ChatToolCallsGenerationEvent +from .chat_tool_plan_delta_event import ChatToolPlanDeltaEvent +from .chat_tool_plan_delta_event_delta import ChatToolPlanDeltaEventDelta from .check_api_key_response import CheckApiKeyResponse +from .citation import Citation +from .citation_end_event import CitationEndEvent +from .citation_options import CitationOptions +from .citation_options_mode import CitationOptionsMode +from .citation_start_event import CitationStartEvent +from .citation_start_event_delta import CitationStartEventDelta +from .citation_start_event_delta_message import CitationStartEventDeltaMessage from .classify_data_metrics import ClassifyDataMetrics from .classify_example import ClassifyExample from .classify_request_truncate import ClassifyRequestTruncate @@ -46,6 +97,7 @@ from .connector import Connector from .connector_auth_status import ConnectorAuthStatus from .connector_o_auth import ConnectorOAuth +from .content import Content, TextContent from .create_connector_o_auth import CreateConnectorOAuth from .create_connector_response import CreateConnectorResponse from .create_connector_service_auth import CreateConnectorServiceAuth @@ -56,6 +108,8 @@ from .dataset_validation_status import DatasetValidationStatus from .delete_connector_response import DeleteConnectorResponse from .detokenize_response import DetokenizeResponse +from .document import Document +from .document_content import DocumentContent from .embed_by_type_response import EmbedByTypeResponse from .embed_by_type_response_embeddings import EmbedByTypeResponseEmbeddings from .embed_floats_response import EmbedFloatsResponse @@ -64,7 +118,15 @@ from .embed_job_status import EmbedJobStatus from .embed_job_truncate import EmbedJobTruncate from .embed_request_truncate import EmbedRequestTruncate -from .embed_response import EmbedResponse, EmbedResponse_EmbeddingsByType, EmbedResponse_EmbeddingsFloats +from .embed_request_v2 import ( + ClassificationEmbedRequestV2, + ClusteringEmbedRequestV2, + EmbedRequestV2, + ImageEmbedRequestV2, + SearchDocumentEmbedRequestV2, + SearchQueryEmbedRequestV2, +) +from .embed_response import EmbedResponse, EmbeddingsByTypeEmbedResponse, EmbeddingsFloatsEmbedResponse from .embedding_type import EmbeddingType from .finetune_dataset_metrics import FinetuneDatasetMetrics from .finish_reason import FinishReason @@ -80,19 +142,21 @@ from .generate_stream_text import GenerateStreamText from .generate_streamed_response import ( GenerateStreamedResponse, - GenerateStreamedResponse_StreamEnd, - GenerateStreamedResponse_StreamError, - GenerateStreamedResponse_TextGeneration, + StreamEndGenerateStreamedResponse, + StreamErrorGenerateStreamedResponse, + TextGenerationGenerateStreamedResponse, ) from .generation import Generation from .get_connector_response import GetConnectorResponse from .get_model_response import GetModelResponse +from .images import Images from .json_response_format import JsonResponseFormat +from .json_response_format_v2 import JsonResponseFormatV2 from .label_metric import LabelMetric from .list_connectors_response import ListConnectorsResponse from .list_embed_job_response import ListEmbedJobResponse from .list_models_response import ListModelsResponse -from .message import Message, Message_Chatbot, Message_System, Message_Tool, Message_User +from .message import ChatbotMessage, Message, SystemMessage, ToolMessage, UserMessage from .metrics import Metrics from .metrics_embed_data import MetricsEmbedData from .metrics_embed_data_fields_item import MetricsEmbedDataFieldsItem @@ -106,53 +170,106 @@ from .rerank_response_results_item import RerankResponseResultsItem from .rerank_response_results_item_document import RerankResponseResultsItemDocument from .reranker_data_metrics import RerankerDataMetrics -from .response_format import ResponseFormat, ResponseFormat_JsonObject, ResponseFormat_Text +from .response_format import JsonObjectResponseFormat, ResponseFormat, TextResponseFormat +from .response_format_v2 import JsonObjectResponseFormatV2, ResponseFormatV2, TextResponseFormatV2 from .single_generation import SingleGeneration from .single_generation_in_stream import SingleGenerationInStream from .single_generation_token_likelihoods_item import SingleGenerationTokenLikelihoodsItem +from .source import DocumentSource, Source, ToolSource from .streamed_chat_response import ( + CitationGenerationStreamedChatResponse, + SearchQueriesGenerationStreamedChatResponse, + SearchResultsStreamedChatResponse, + StreamEndStreamedChatResponse, + StreamStartStreamedChatResponse, StreamedChatResponse, - StreamedChatResponse_CitationGeneration, - StreamedChatResponse_SearchQueriesGeneration, - StreamedChatResponse_SearchResults, - StreamedChatResponse_StreamEnd, - StreamedChatResponse_StreamStart, - StreamedChatResponse_TextGeneration, - StreamedChatResponse_ToolCallsChunk, - StreamedChatResponse_ToolCallsGeneration, + TextGenerationStreamedChatResponse, + ToolCallsChunkStreamedChatResponse, + ToolCallsGenerationStreamedChatResponse, +) +from .streamed_chat_response_v2 import ( + CitationEndStreamedChatResponseV2, + CitationStartStreamedChatResponseV2, + ContentDeltaStreamedChatResponseV2, + ContentEndStreamedChatResponseV2, + ContentStartStreamedChatResponseV2, + MessageEndStreamedChatResponseV2, + MessageStartStreamedChatResponseV2, + StreamedChatResponseV2, + ToolCallDeltaStreamedChatResponseV2, + ToolCallEndStreamedChatResponseV2, + ToolCallStartStreamedChatResponseV2, + ToolPlanDeltaStreamedChatResponseV2, ) from .summarize_request_extractiveness import SummarizeRequestExtractiveness from .summarize_request_format import SummarizeRequestFormat from .summarize_request_length import SummarizeRequestLength from .summarize_response import SummarizeResponse -from .text_response_format import TextResponseFormat +from .system_message_content import SystemMessageContent +from .system_message_content_item import SystemMessageContentItem, TextSystemMessageContentItem +from .texts import Texts +from .texts_truncate import TextsTruncate from .tokenize_response import TokenizeResponse from .too_many_requests_error_body import TooManyRequestsErrorBody from .tool import Tool from .tool_call import ToolCall from .tool_call_delta import ToolCallDelta -from .tool_message import ToolMessage +from .tool_call_v2 import ToolCallV2 +from .tool_call_v2function import ToolCallV2Function +from .tool_content import DocumentToolContent, TextToolContent, ToolContent +from .tool_message_v2 import ToolMessageV2 +from .tool_message_v2tool_content import ToolMessageV2ToolContent from .tool_parameter_definitions_value import ToolParameterDefinitionsValue from .tool_result import ToolResult +from .tool_v2 import ToolV2 +from .tool_v2function import ToolV2Function from .unprocessable_entity_error_body import UnprocessableEntityErrorBody from .update_connector_response import UpdateConnectorResponse +from .usage import Usage +from .usage_billed_units import UsageBilledUnits +from .usage_tokens import UsageTokens +from .user_message_content import UserMessageContent __all__ = [ "ApiMeta", "ApiMetaApiVersion", "ApiMetaBilledUnits", "ApiMetaTokens", + "AssistantChatMessageV2", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentItem", + "AssistantMessageResponse", + "AssistantMessageResponseContentItem", "AuthTokenType", "ChatCitation", "ChatCitationGenerationEvent", "ChatConnector", + "ChatContentDeltaEvent", + "ChatContentDeltaEventDelta", + "ChatContentDeltaEventDeltaMessage", + "ChatContentDeltaEventDeltaMessageContent", + "ChatContentEndEvent", + "ChatContentStartEvent", + "ChatContentStartEventDelta", + "ChatContentStartEventDeltaMessage", + "ChatContentStartEventDeltaMessageContent", "ChatDataMetrics", "ChatDocument", + "ChatFinishReason", "ChatMessage", + "ChatMessageEndEvent", + "ChatMessageEndEventDelta", + "ChatMessageStartEvent", + "ChatMessageStartEventDelta", + "ChatMessageStartEventDeltaMessage", + "ChatMessageV2", + "ChatMessages", "ChatRequestCitationQuality", "ChatRequestConnectorsSearchOptions", "ChatRequestPromptTruncation", "ChatRequestSafetyMode", + "ChatResponse", "ChatSearchQueriesGenerationEvent", "ChatSearchQuery", "ChatSearchResult", @@ -161,15 +278,39 @@ "ChatStreamEndEvent", "ChatStreamEndEventFinishReason", "ChatStreamEvent", + "ChatStreamEventType", "ChatStreamRequestCitationQuality", "ChatStreamRequestConnectorsSearchOptions", "ChatStreamRequestPromptTruncation", "ChatStreamRequestSafetyMode", "ChatStreamStartEvent", "ChatTextGenerationEvent", + "ChatToolCallDeltaEvent", + "ChatToolCallDeltaEventDelta", + "ChatToolCallDeltaEventDeltaToolCall", + "ChatToolCallDeltaEventDeltaToolCallFunction", + "ChatToolCallEndEvent", + "ChatToolCallStartEvent", + "ChatToolCallStartEventDelta", + "ChatToolCallStartEventDeltaToolCall", + "ChatToolCallStartEventDeltaToolCallFunction", "ChatToolCallsChunkEvent", "ChatToolCallsGenerationEvent", + "ChatToolPlanDeltaEvent", + "ChatToolPlanDeltaEventDelta", + "ChatbotMessage", "CheckApiKeyResponse", + "Citation", + "CitationEndEvent", + "CitationEndStreamedChatResponseV2", + "CitationGenerationStreamedChatResponse", + "CitationOptions", + "CitationOptionsMode", + "CitationStartEvent", + "CitationStartEventDelta", + "CitationStartEventDeltaMessage", + "CitationStartStreamedChatResponseV2", + "ClassificationEmbedRequestV2", "ClassifyDataMetrics", "ClassifyExample", "ClassifyRequestTruncate", @@ -178,10 +319,15 @@ "ClassifyResponseClassificationsItemClassificationType", "ClassifyResponseClassificationsItemLabelsValue", "ClientClosedRequestErrorBody", + "ClusteringEmbedRequestV2", "CompatibleEndpoint", "Connector", "ConnectorAuthStatus", "ConnectorOAuth", + "Content", + "ContentDeltaStreamedChatResponseV2", + "ContentEndStreamedChatResponseV2", + "ContentStartStreamedChatResponseV2", "CreateConnectorOAuth", "CreateConnectorResponse", "CreateConnectorServiceAuth", @@ -192,6 +338,10 @@ "DatasetValidationStatus", "DeleteConnectorResponse", "DetokenizeResponse", + "Document", + "DocumentContent", + "DocumentSource", + "DocumentToolContent", "EmbedByTypeResponse", "EmbedByTypeResponseEmbeddings", "EmbedFloatsResponse", @@ -200,10 +350,11 @@ "EmbedJobStatus", "EmbedJobTruncate", "EmbedRequestTruncate", + "EmbedRequestV2", "EmbedResponse", - "EmbedResponse_EmbeddingsByType", - "EmbedResponse_EmbeddingsFloats", "EmbeddingType", + "EmbeddingsByTypeEmbedResponse", + "EmbeddingsFloatsEmbedResponse", "FinetuneDatasetMetrics", "FinishReason", "GatewayTimeoutErrorBody", @@ -217,22 +368,22 @@ "GenerateStreamRequestTruncate", "GenerateStreamText", "GenerateStreamedResponse", - "GenerateStreamedResponse_StreamEnd", - "GenerateStreamedResponse_StreamError", - "GenerateStreamedResponse_TextGeneration", "Generation", "GetConnectorResponse", "GetModelResponse", + "ImageEmbedRequestV2", + "Images", + "JsonObjectResponseFormat", + "JsonObjectResponseFormatV2", "JsonResponseFormat", + "JsonResponseFormatV2", "LabelMetric", "ListConnectorsResponse", "ListEmbedJobResponse", "ListModelsResponse", "Message", - "Message_Chatbot", - "Message_System", - "Message_Tool", - "Message_User", + "MessageEndStreamedChatResponseV2", + "MessageStartStreamedChatResponseV2", "Metrics", "MetricsEmbedData", "MetricsEmbedDataFieldsItem", @@ -247,33 +398,69 @@ "RerankResponseResultsItemDocument", "RerankerDataMetrics", "ResponseFormat", - "ResponseFormat_JsonObject", - "ResponseFormat_Text", + "ResponseFormatV2", + "SearchDocumentEmbedRequestV2", + "SearchQueriesGenerationStreamedChatResponse", + "SearchQueryEmbedRequestV2", + "SearchResultsStreamedChatResponse", "SingleGeneration", "SingleGenerationInStream", "SingleGenerationTokenLikelihoodsItem", + "Source", + "StreamEndGenerateStreamedResponse", + "StreamEndStreamedChatResponse", + "StreamErrorGenerateStreamedResponse", + "StreamStartStreamedChatResponse", "StreamedChatResponse", - "StreamedChatResponse_CitationGeneration", - "StreamedChatResponse_SearchQueriesGeneration", - "StreamedChatResponse_SearchResults", - "StreamedChatResponse_StreamEnd", - "StreamedChatResponse_StreamStart", - "StreamedChatResponse_TextGeneration", - "StreamedChatResponse_ToolCallsChunk", - "StreamedChatResponse_ToolCallsGeneration", + "StreamedChatResponseV2", "SummarizeRequestExtractiveness", "SummarizeRequestFormat", "SummarizeRequestLength", "SummarizeResponse", + "SystemChatMessageV2", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentItem", + "TextAssistantMessageContentItem", + "TextAssistantMessageResponseContentItem", + "TextContent", + "TextGenerationGenerateStreamedResponse", + "TextGenerationStreamedChatResponse", "TextResponseFormat", + "TextResponseFormatV2", + "TextSystemMessageContentItem", + "TextToolContent", + "Texts", + "TextsTruncate", "TokenizeResponse", "TooManyRequestsErrorBody", "Tool", "ToolCall", "ToolCallDelta", + "ToolCallDeltaStreamedChatResponseV2", + "ToolCallEndStreamedChatResponseV2", + "ToolCallStartStreamedChatResponseV2", + "ToolCallV2", + "ToolCallV2Function", + "ToolCallsChunkStreamedChatResponse", + "ToolCallsGenerationStreamedChatResponse", + "ToolChatMessageV2", + "ToolContent", "ToolMessage", + "ToolMessageV2", + "ToolMessageV2ToolContent", "ToolParameterDefinitionsValue", + "ToolPlanDeltaStreamedChatResponseV2", "ToolResult", + "ToolSource", + "ToolV2", + "ToolV2Function", "UnprocessableEntityErrorBody", "UpdateConnectorResponse", + "Usage", + "UsageBilledUnits", + "UsageTokens", + "UserChatMessageV2", + "UserMessage", + "UserMessageContent", ] diff --git a/src/cohere/v2/types/assistant_message.py b/src/cohere/types/assistant_message.py similarity index 78% rename from src/cohere/v2/types/assistant_message.py rename to src/cohere/types/assistant_message.py index e299a2158..6393f7d36 100644 --- a/src/cohere/v2/types/assistant_message.py +++ b/src/cohere/types/assistant_message.py @@ -1,11 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from .tool_call2 import ToolCall2 +from .tool_call_v2 import ToolCallV2 from .assistant_message_content import AssistantMessageContent from .citation import Citation -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -14,7 +14,7 @@ class AssistantMessage(UncheckedBaseModel): A message from the assistant role can contain text and tool call information. """ - tool_calls: typing.Optional[typing.List[ToolCall2]] = None + tool_calls: typing.Optional[typing.List[ToolCallV2]] = None tool_plan: typing.Optional[str] = None content: typing.Optional[AssistantMessageContent] = None citations: typing.Optional[typing.List[Citation]] = None diff --git a/src/cohere/v2/types/assistant_message_content.py b/src/cohere/types/assistant_message_content.py similarity index 100% rename from src/cohere/v2/types/assistant_message_content.py rename to src/cohere/types/assistant_message_content.py diff --git a/src/cohere/v2/types/assistant_message_content_item.py b/src/cohere/types/assistant_message_content_item.py similarity index 65% rename from src/cohere/v2/types/assistant_message_content_item.py rename to src/cohere/types/assistant_message_content_item.py index 7f2936c96..eeb64cf4a 100644 --- a/src/cohere/v2/types/assistant_message_content_item.py +++ b/src/cohere/types/assistant_message_content_item.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic import typing_extensions -from ...core.unchecked_base_model import UnionMetadata +from ..core.unchecked_base_model import UnionMetadata -class AssistantMessageContentItem_Text(UncheckedBaseModel): +class TextAssistantMessageContentItem(UncheckedBaseModel): type: typing.Literal["text"] = "text" text: str @@ -24,5 +24,5 @@ class Config: AssistantMessageContentItem = typing_extensions.Annotated[ - AssistantMessageContentItem_Text, UnionMetadata(discriminant="type") + TextAssistantMessageContentItem, UnionMetadata(discriminant="type") ] diff --git a/src/cohere/v2/types/assistant_message_response.py b/src/cohere/types/assistant_message_response.py similarity index 80% rename from src/cohere/v2/types/assistant_message_response.py rename to src/cohere/types/assistant_message_response.py index b0cf9e170..a00c827dd 100644 --- a/src/cohere/v2/types/assistant_message_response.py +++ b/src/cohere/types/assistant_message_response.py @@ -1,11 +1,11 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from .tool_call2 import ToolCall2 +from .tool_call_v2 import ToolCallV2 from .assistant_message_response_content_item import AssistantMessageResponseContentItem from .citation import Citation -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -15,7 +15,7 @@ class AssistantMessageResponse(UncheckedBaseModel): """ role: typing.Literal["assistant"] = "assistant" - tool_calls: typing.Optional[typing.List[ToolCall2]] = None + tool_calls: typing.Optional[typing.List[ToolCallV2]] = None tool_plan: typing.Optional[str] = None content: typing.Optional[typing.List[AssistantMessageResponseContentItem]] = None citations: typing.Optional[typing.List[Citation]] = None diff --git a/src/cohere/v2/types/assistant_message_response_content_item.py b/src/cohere/types/assistant_message_response_content_item.py similarity index 64% rename from src/cohere/v2/types/assistant_message_response_content_item.py rename to src/cohere/types/assistant_message_response_content_item.py index 2c49973f5..6b5686530 100644 --- a/src/cohere/v2/types/assistant_message_response_content_item.py +++ b/src/cohere/types/assistant_message_response_content_item.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic import typing_extensions -from ...core.unchecked_base_model import UnionMetadata +from ..core.unchecked_base_model import UnionMetadata -class AssistantMessageResponseContentItem_Text(UncheckedBaseModel): +class TextAssistantMessageResponseContentItem(UncheckedBaseModel): type: typing.Literal["text"] = "text" text: str @@ -24,5 +24,5 @@ class Config: AssistantMessageResponseContentItem = typing_extensions.Annotated[ - AssistantMessageResponseContentItem_Text, UnionMetadata(discriminant="type") + TextAssistantMessageResponseContentItem, UnionMetadata(discriminant="type") ] diff --git a/src/cohere/v2/types/chat_content_delta_event.py b/src/cohere/types/chat_content_delta_event.py similarity index 93% rename from src/cohere/v2/types/chat_content_delta_event.py rename to src/cohere/types/chat_content_delta_event.py index b33952a06..95cea88ed 100644 --- a/src/cohere/v2/types/chat_content_delta_event.py +++ b/src/cohere/types/chat_content_delta_event.py @@ -3,7 +3,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing from .chat_content_delta_event_delta import ChatContentDeltaEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_content_delta_event_delta.py b/src/cohere/types/chat_content_delta_event_delta.py similarity index 84% rename from src/cohere/v2/types/chat_content_delta_event_delta.py rename to src/cohere/types/chat_content_delta_event_delta.py index 8c868844c..b2245aeaf 100644 --- a/src/cohere/v2/types/chat_content_delta_event_delta.py +++ b/src/cohere/types/chat_content_delta_event_delta.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_content_delta_event_delta_message import ChatContentDeltaEventDeltaMessage -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_content_delta_event_delta_message.py b/src/cohere/types/chat_content_delta_event_delta_message.py similarity index 84% rename from src/cohere/v2/types/chat_content_delta_event_delta_message.py rename to src/cohere/types/chat_content_delta_event_delta_message.py index 5bf3a1999..107837e57 100644 --- a/src/cohere/v2/types/chat_content_delta_event_delta_message.py +++ b/src/cohere/types/chat_content_delta_event_delta_message.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_content_delta_event_delta_message_content import ChatContentDeltaEventDeltaMessageContent -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_content_delta_event_delta_message_content.py b/src/cohere/types/chat_content_delta_event_delta_message_content.py similarity index 81% rename from src/cohere/v2/types/chat_content_delta_event_delta_message_content.py rename to src/cohere/types/chat_content_delta_event_delta_message_content.py index 9425f36eb..78470d40e 100644 --- a/src/cohere/v2/types/chat_content_delta_event_delta_message_content.py +++ b/src/cohere/types/chat_content_delta_event_delta_message_content.py @@ -1,8 +1,8 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_content_end_event.py b/src/cohere/types/chat_content_end_event.py similarity index 92% rename from src/cohere/v2/types/chat_content_end_event.py rename to src/cohere/types/chat_content_end_event.py index f3337173d..9c7833b58 100644 --- a/src/cohere/v2/types/chat_content_end_event.py +++ b/src/cohere/types/chat_content_end_event.py @@ -2,7 +2,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_content_start_event.py b/src/cohere/types/chat_content_start_event.py similarity index 93% rename from src/cohere/v2/types/chat_content_start_event.py rename to src/cohere/types/chat_content_start_event.py index df6372716..3140073f9 100644 --- a/src/cohere/v2/types/chat_content_start_event.py +++ b/src/cohere/types/chat_content_start_event.py @@ -3,7 +3,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing from .chat_content_start_event_delta import ChatContentStartEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_content_start_event_delta.py b/src/cohere/types/chat_content_start_event_delta.py similarity index 84% rename from src/cohere/v2/types/chat_content_start_event_delta.py rename to src/cohere/types/chat_content_start_event_delta.py index 4cd5d191e..fd0172193 100644 --- a/src/cohere/v2/types/chat_content_start_event_delta.py +++ b/src/cohere/types/chat_content_start_event_delta.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_content_start_event_delta_message import ChatContentStartEventDeltaMessage -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_content_start_event_delta_message.py b/src/cohere/types/chat_content_start_event_delta_message.py similarity index 84% rename from src/cohere/v2/types/chat_content_start_event_delta_message.py rename to src/cohere/types/chat_content_start_event_delta_message.py index 6b182da31..8b7e0d6b5 100644 --- a/src/cohere/v2/types/chat_content_start_event_delta_message.py +++ b/src/cohere/types/chat_content_start_event_delta_message.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_content_start_event_delta_message_content import ChatContentStartEventDeltaMessageContent -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_content_start_event_delta_message_content.py b/src/cohere/types/chat_content_start_event_delta_message_content.py similarity index 82% rename from src/cohere/v2/types/chat_content_start_event_delta_message_content.py rename to src/cohere/types/chat_content_start_event_delta_message_content.py index e55f8de83..945e549cf 100644 --- a/src/cohere/v2/types/chat_content_start_event_delta_message_content.py +++ b/src/cohere/types/chat_content_start_event_delta_message_content.py @@ -1,8 +1,8 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_finish_reason.py b/src/cohere/types/chat_finish_reason.py similarity index 100% rename from src/cohere/v2/types/chat_finish_reason.py rename to src/cohere/types/chat_finish_reason.py diff --git a/src/cohere/v2/types/chat_message_end_event.py b/src/cohere/types/chat_message_end_event.py similarity index 93% rename from src/cohere/v2/types/chat_message_end_event.py rename to src/cohere/types/chat_message_end_event.py index f0d038163..3f518a77c 100644 --- a/src/cohere/v2/types/chat_message_end_event.py +++ b/src/cohere/types/chat_message_end_event.py @@ -3,7 +3,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing from .chat_message_end_event_delta import ChatMessageEndEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_message_end_event_delta.py b/src/cohere/types/chat_message_end_event_delta.py similarity index 84% rename from src/cohere/v2/types/chat_message_end_event_delta.py rename to src/cohere/types/chat_message_end_event_delta.py index ee91b93cb..2b5808254 100644 --- a/src/cohere/v2/types/chat_message_end_event_delta.py +++ b/src/cohere/types/chat_message_end_event_delta.py @@ -1,10 +1,10 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_finish_reason import ChatFinishReason from .usage import Usage -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_message_start_event.py b/src/cohere/types/chat_message_start_event.py similarity index 93% rename from src/cohere/v2/types/chat_message_start_event.py rename to src/cohere/types/chat_message_start_event.py index fdfc2a8d2..c345d1bcd 100644 --- a/src/cohere/v2/types/chat_message_start_event.py +++ b/src/cohere/types/chat_message_start_event.py @@ -4,7 +4,7 @@ import typing import pydantic from .chat_message_start_event_delta import ChatMessageStartEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ChatMessageStartEvent(ChatStreamEventType): diff --git a/src/cohere/v2/types/chat_message_start_event_delta.py b/src/cohere/types/chat_message_start_event_delta.py similarity index 84% rename from src/cohere/v2/types/chat_message_start_event_delta.py rename to src/cohere/types/chat_message_start_event_delta.py index 1a34986ed..e8a98939d 100644 --- a/src/cohere/v2/types/chat_message_start_event_delta.py +++ b/src/cohere/types/chat_message_start_event_delta.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_message_start_event_delta_message import ChatMessageStartEventDeltaMessage -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_message_start_event_delta_message.py b/src/cohere/types/chat_message_start_event_delta_message.py similarity index 83% rename from src/cohere/v2/types/chat_message_start_event_delta_message.py rename to src/cohere/types/chat_message_start_event_delta_message.py index 24cedb102..b384fd59d 100644 --- a/src/cohere/v2/types/chat_message_start_event_delta_message.py +++ b/src/cohere/types/chat_message_start_event_delta_message.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ChatMessageStartEventDeltaMessage(UncheckedBaseModel): diff --git a/src/cohere/v2/types/chat_message2.py b/src/cohere/types/chat_message_v2.py similarity index 76% rename from src/cohere/v2/types/chat_message2.py rename to src/cohere/types/chat_message_v2.py index 0abc57ff4..b26258010 100644 --- a/src/cohere/v2/types/chat_message2.py +++ b/src/cohere/types/chat_message_v2.py @@ -1,28 +1,27 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .user_message_content import UserMessageContent -from ...types.chat_document import ChatDocument -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from .tool_call2 import ToolCall2 +from .tool_call_v2 import ToolCallV2 from .assistant_message_content import AssistantMessageContent from .citation import Citation from .system_message_content import SystemMessageContent +from .tool_message_v2tool_content import ToolMessageV2ToolContent import typing_extensions -from ...core.unchecked_base_model import UnionMetadata +from ..core.unchecked_base_model import UnionMetadata -class ChatMessage2_User(UncheckedBaseModel): +class UserChatMessageV2(UncheckedBaseModel): """ Represents a single message in the chat history from a given role. """ role: typing.Literal["user"] = "user" content: UserMessageContent - documents: typing.Optional[typing.List[ChatDocument]] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 @@ -34,13 +33,13 @@ class Config: extra = pydantic.Extra.allow -class ChatMessage2_Assistant(UncheckedBaseModel): +class AssistantChatMessageV2(UncheckedBaseModel): """ Represents a single message in the chat history from a given role. """ role: typing.Literal["assistant"] = "assistant" - tool_calls: typing.Optional[typing.List[ToolCall2]] = None + tool_calls: typing.Optional[typing.List[ToolCallV2]] = None tool_plan: typing.Optional[str] = None content: typing.Optional[AssistantMessageContent] = None citations: typing.Optional[typing.List[Citation]] = None @@ -55,7 +54,7 @@ class Config: extra = pydantic.Extra.allow -class ChatMessage2_System(UncheckedBaseModel): +class SystemChatMessageV2(UncheckedBaseModel): """ Represents a single message in the chat history from a given role. """ @@ -73,14 +72,14 @@ class Config: extra = pydantic.Extra.allow -class ChatMessage2_Tool(UncheckedBaseModel): +class ToolChatMessageV2(UncheckedBaseModel): """ Represents a single message in the chat history from a given role. """ role: typing.Literal["tool"] = "tool" tool_call_id: str - tool_content: typing.List[str] + tool_content: ToolMessageV2ToolContent if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 @@ -92,7 +91,7 @@ class Config: extra = pydantic.Extra.allow -ChatMessage2 = typing_extensions.Annotated[ - typing.Union[ChatMessage2_User, ChatMessage2_Assistant, ChatMessage2_System, ChatMessage2_Tool], +ChatMessageV2 = typing_extensions.Annotated[ + typing.Union[UserChatMessageV2, AssistantChatMessageV2, SystemChatMessageV2, ToolChatMessageV2], UnionMetadata(discriminant="role"), ] diff --git a/src/cohere/types/chat_messages.py b/src/cohere/types/chat_messages.py new file mode 100644 index 000000000..7c2af2180 --- /dev/null +++ b/src/cohere/types/chat_messages.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message_v2 import ChatMessageV2 + +ChatMessages = typing.List[ChatMessageV2] diff --git a/src/cohere/v2/types/non_streamed_chat_response2.py b/src/cohere/types/chat_response.py similarity index 85% rename from src/cohere/v2/types/non_streamed_chat_response2.py rename to src/cohere/types/chat_response.py index a0f2a8db6..738f56540 100644 --- a/src/cohere/v2/types/non_streamed_chat_response2.py +++ b/src/cohere/types/chat_response.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import pydantic from .chat_finish_reason import ChatFinishReason import typing from .assistant_message_response import AssistantMessageResponse from .usage import Usage -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 -class NonStreamedChatResponse2(UncheckedBaseModel): +class ChatResponse(UncheckedBaseModel): id: str = pydantic.Field() """ Unique identifier for the generated reply. Useful for submitting feedback. diff --git a/src/cohere/v2/types/chat_stream_event_type.py b/src/cohere/types/chat_stream_event_type.py similarity index 80% rename from src/cohere/v2/types/chat_stream_event_type.py rename to src/cohere/types/chat_stream_event_type.py index 0bd3c55ba..70d25a015 100644 --- a/src/cohere/v2/types/chat_stream_event_type.py +++ b/src/cohere/types/chat_stream_event_type.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_delta_event.py b/src/cohere/types/chat_tool_call_delta_event.py similarity index 93% rename from src/cohere/v2/types/chat_tool_call_delta_event.py rename to src/cohere/types/chat_tool_call_delta_event.py index 84afb7677..a287d9312 100644 --- a/src/cohere/v2/types/chat_tool_call_delta_event.py +++ b/src/cohere/types/chat_tool_call_delta_event.py @@ -3,7 +3,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing from .chat_tool_call_delta_event_delta import ChatToolCallDeltaEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_delta_event_delta.py b/src/cohere/types/chat_tool_call_delta_event_delta.py similarity index 84% rename from src/cohere/v2/types/chat_tool_call_delta_event_delta.py rename to src/cohere/types/chat_tool_call_delta_event_delta.py index 47df23b3c..815773dbb 100644 --- a/src/cohere/v2/types/chat_tool_call_delta_event_delta.py +++ b/src/cohere/types/chat_tool_call_delta_event_delta.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_tool_call_delta_event_delta_tool_call import ChatToolCallDeltaEventDeltaToolCall -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_delta_event_delta_tool_call.py b/src/cohere/types/chat_tool_call_delta_event_delta_tool_call.py similarity index 84% rename from src/cohere/v2/types/chat_tool_call_delta_event_delta_tool_call.py rename to src/cohere/types/chat_tool_call_delta_event_delta_tool_call.py index f53f2a5cb..2d0f8f877 100644 --- a/src/cohere/v2/types/chat_tool_call_delta_event_delta_tool_call.py +++ b/src/cohere/types/chat_tool_call_delta_event_delta_tool_call.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_tool_call_delta_event_delta_tool_call_function import ChatToolCallDeltaEventDeltaToolCallFunction -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_delta_event_delta_tool_call_function.py b/src/cohere/types/chat_tool_call_delta_event_delta_tool_call_function.py similarity index 81% rename from src/cohere/v2/types/chat_tool_call_delta_event_delta_tool_call_function.py rename to src/cohere/types/chat_tool_call_delta_event_delta_tool_call_function.py index 2ec3c2d69..27dd7d575 100644 --- a/src/cohere/v2/types/chat_tool_call_delta_event_delta_tool_call_function.py +++ b/src/cohere/types/chat_tool_call_delta_event_delta_tool_call_function.py @@ -1,8 +1,8 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_end_event.py b/src/cohere/types/chat_tool_call_end_event.py similarity index 92% rename from src/cohere/v2/types/chat_tool_call_end_event.py rename to src/cohere/types/chat_tool_call_end_event.py index dfa3bef27..788ef04f7 100644 --- a/src/cohere/v2/types/chat_tool_call_end_event.py +++ b/src/cohere/types/chat_tool_call_end_event.py @@ -2,7 +2,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_start_event.py b/src/cohere/types/chat_tool_call_start_event.py similarity index 93% rename from src/cohere/v2/types/chat_tool_call_start_event.py rename to src/cohere/types/chat_tool_call_start_event.py index e241b3c33..2ad477ef7 100644 --- a/src/cohere/v2/types/chat_tool_call_start_event.py +++ b/src/cohere/types/chat_tool_call_start_event.py @@ -3,7 +3,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing from .chat_tool_call_start_event_delta import ChatToolCallStartEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_start_event_delta.py b/src/cohere/types/chat_tool_call_start_event_delta.py similarity index 84% rename from src/cohere/v2/types/chat_tool_call_start_event_delta.py rename to src/cohere/types/chat_tool_call_start_event_delta.py index bce86be8a..101cba4ad 100644 --- a/src/cohere/v2/types/chat_tool_call_start_event_delta.py +++ b/src/cohere/types/chat_tool_call_start_event_delta.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_tool_call_start_event_delta_tool_call import ChatToolCallStartEventDeltaToolCall -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_start_event_delta_tool_call.py b/src/cohere/types/chat_tool_call_start_event_delta_tool_call.py similarity index 86% rename from src/cohere/v2/types/chat_tool_call_start_event_delta_tool_call.py rename to src/cohere/types/chat_tool_call_start_event_delta_tool_call.py index 3d100b023..e8724bc3c 100644 --- a/src/cohere/v2/types/chat_tool_call_start_event_delta_tool_call.py +++ b/src/cohere/types/chat_tool_call_start_event_delta_tool_call.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_tool_call_start_event_delta_tool_call_function import ChatToolCallStartEventDeltaToolCallFunction -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_call_start_event_delta_tool_call_function.py b/src/cohere/types/chat_tool_call_start_event_delta_tool_call_function.py similarity index 82% rename from src/cohere/v2/types/chat_tool_call_start_event_delta_tool_call_function.py rename to src/cohere/types/chat_tool_call_start_event_delta_tool_call_function.py index c4d88cf75..f7dcfcc8b 100644 --- a/src/cohere/v2/types/chat_tool_call_start_event_delta_tool_call_function.py +++ b/src/cohere/types/chat_tool_call_start_event_delta_tool_call_function.py @@ -1,8 +1,8 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_plan_delta_event.py b/src/cohere/types/chat_tool_plan_delta_event.py similarity index 92% rename from src/cohere/v2/types/chat_tool_plan_delta_event.py rename to src/cohere/types/chat_tool_plan_delta_event.py index dee9a0ec8..f2286a99c 100644 --- a/src/cohere/v2/types/chat_tool_plan_delta_event.py +++ b/src/cohere/types/chat_tool_plan_delta_event.py @@ -3,7 +3,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing from .chat_tool_plan_delta_event_delta import ChatToolPlanDeltaEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/chat_tool_plan_delta_event_delta.py b/src/cohere/types/chat_tool_plan_delta_event_delta.py similarity index 81% rename from src/cohere/v2/types/chat_tool_plan_delta_event_delta.py rename to src/cohere/types/chat_tool_plan_delta_event_delta.py index 1aa872220..6dacb1dce 100644 --- a/src/cohere/v2/types/chat_tool_plan_delta_event_delta.py +++ b/src/cohere/types/chat_tool_plan_delta_event_delta.py @@ -1,8 +1,8 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/citation.py b/src/cohere/types/citation.py similarity index 86% rename from src/cohere/v2/types/citation.py rename to src/cohere/types/citation.py index 140f28677..23dc6740e 100644 --- a/src/cohere/v2/types/citation.py +++ b/src/cohere/types/citation.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .source import Source -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/citation_end_event.py b/src/cohere/types/citation_end_event.py similarity index 91% rename from src/cohere/v2/types/citation_end_event.py rename to src/cohere/types/citation_end_event.py index 9c9fd5c47..b0a3853ce 100644 --- a/src/cohere/v2/types/citation_end_event.py +++ b/src/cohere/types/citation_end_event.py @@ -2,7 +2,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/types/citation_options.py b/src/cohere/types/citation_options.py new file mode 100644 index 000000000..d03992be8 --- /dev/null +++ b/src/cohere/types/citation_options.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .citation_options_mode import CitationOptionsMode +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class CitationOptions(UncheckedBaseModel): + """ + Options for controlling citation generation. + """ + + mode: typing.Optional[CitationOptionsMode] = pydantic.Field(default=None) + """ + Defaults to `"accurate"`. + Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/types/citation_options_mode.py b/src/cohere/types/citation_options_mode.py new file mode 100644 index 000000000..ddfdf67f2 --- /dev/null +++ b/src/cohere/types/citation_options_mode.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CitationOptionsMode = typing.Union[typing.Literal["FAST", "ACCURATE", "OFF"], typing.Any] diff --git a/src/cohere/v2/types/citation_start_event.py b/src/cohere/types/citation_start_event.py similarity index 93% rename from src/cohere/v2/types/citation_start_event.py rename to src/cohere/types/citation_start_event.py index 2052f5ebf..355281d9a 100644 --- a/src/cohere/v2/types/citation_start_event.py +++ b/src/cohere/types/citation_start_event.py @@ -3,7 +3,7 @@ from .chat_stream_event_type import ChatStreamEventType import typing from .citation_start_event_delta import CitationStartEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/citation_start_event_delta.py b/src/cohere/types/citation_start_event_delta.py similarity index 83% rename from src/cohere/v2/types/citation_start_event_delta.py rename to src/cohere/types/citation_start_event_delta.py index f59e78003..f114b5159 100644 --- a/src/cohere/v2/types/citation_start_event_delta.py +++ b/src/cohere/types/citation_start_event_delta.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .citation_start_event_delta_message import CitationStartEventDeltaMessage -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/citation_start_event_delta_message.py b/src/cohere/types/citation_start_event_delta_message.py similarity index 82% rename from src/cohere/v2/types/citation_start_event_delta_message.py rename to src/cohere/types/citation_start_event_delta_message.py index b0741909c..cfddfdda1 100644 --- a/src/cohere/v2/types/citation_start_event_delta_message.py +++ b/src/cohere/types/citation_start_event_delta_message.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .citation import Citation -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/content.py b/src/cohere/types/content.py similarity index 67% rename from src/cohere/v2/types/content.py rename to src/cohere/types/content.py index 597aa8348..3b2ded0f4 100644 --- a/src/cohere/v2/types/content.py +++ b/src/cohere/types/content.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic import typing_extensions -from ...core.unchecked_base_model import UnionMetadata +from ..core.unchecked_base_model import UnionMetadata -class Content_Text(UncheckedBaseModel): +class TextContent(UncheckedBaseModel): """ A Content block which contains information about the content type and the content itself. """ @@ -27,4 +27,4 @@ class Config: extra = pydantic.Extra.allow -Content = typing_extensions.Annotated[Content_Text, UnionMetadata(discriminant="type")] +Content = typing_extensions.Annotated[TextContent, UnionMetadata(discriminant="type")] diff --git a/src/cohere/types/document.py b/src/cohere/types/document.py new file mode 100644 index 000000000..a089622ea --- /dev/null +++ b/src/cohere/types/document.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class Document(UncheckedBaseModel): + """ + Relevant information that could be used by the model to generate a more accurate reply. + The content of each document are generally short (should be under 300 words). Metadata should be used to provide additional information, both the key name and the value will be + passed to the model. + """ + + data: typing.Dict[str, str] = pydantic.Field() + """ + A relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary. + """ + + id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for this document which will be referenced in citations. If not provided an ID will be automatically generated + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/types/document_content.py b/src/cohere/types/document_content.py new file mode 100644 index 000000000..a3194d504 --- /dev/null +++ b/src/cohere/types/document_content.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .document import Document +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class DocumentContent(UncheckedBaseModel): + """ + Document content. + """ + + document: Document + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/document_source.py b/src/cohere/types/document_source.py similarity index 86% rename from src/cohere/v2/types/document_source.py rename to src/cohere/types/document_source.py index a04be859e..2402ba055 100644 --- a/src/cohere/v2/types/document_source.py +++ b/src/cohere/types/document_source.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class DocumentSource(UncheckedBaseModel): diff --git a/src/cohere/types/embed_request_v2.py b/src/cohere/types/embed_request_v2.py new file mode 100644 index 000000000..d5c36dbb5 --- /dev/null +++ b/src/cohere/types/embed_request_v2.py @@ -0,0 +1,107 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .embedding_type import EmbeddingType +from .texts_truncate import TextsTruncate +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +import typing_extensions +from ..core.unchecked_base_model import UnionMetadata + + +class SearchDocumentEmbedRequestV2(UncheckedBaseModel): + input_type: typing.Literal["search_document"] = "search_document" + texts: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + truncate: typing.Optional[TextsTruncate] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class SearchQueryEmbedRequestV2(UncheckedBaseModel): + input_type: typing.Literal["search_query"] = "search_query" + texts: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + truncate: typing.Optional[TextsTruncate] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ClassificationEmbedRequestV2(UncheckedBaseModel): + input_type: typing.Literal["classification"] = "classification" + texts: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + truncate: typing.Optional[TextsTruncate] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ClusteringEmbedRequestV2(UncheckedBaseModel): + input_type: typing.Literal["clustering"] = "clustering" + texts: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + truncate: typing.Optional[TextsTruncate] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ImageEmbedRequestV2(UncheckedBaseModel): + input_type: typing.Literal["image"] = "image" + images: typing.List[str] + model: str + embedding_types: typing.Optional[typing.List[EmbeddingType]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +EmbedRequestV2 = typing_extensions.Annotated[ + typing.Union[ + SearchDocumentEmbedRequestV2, + SearchQueryEmbedRequestV2, + ClassificationEmbedRequestV2, + ClusteringEmbedRequestV2, + ImageEmbedRequestV2, + ], + UnionMetadata(discriminant="input_type"), +] diff --git a/src/cohere/types/embed_response.py b/src/cohere/types/embed_response.py index d1d6083f1..d6e142daf 100644 --- a/src/cohere/types/embed_response.py +++ b/src/cohere/types/embed_response.py @@ -11,7 +11,7 @@ from ..core.unchecked_base_model import UnionMetadata -class EmbedResponse_EmbeddingsFloats(UncheckedBaseModel): +class EmbeddingsFloatsEmbedResponse(UncheckedBaseModel): response_type: typing.Literal["embeddings_floats"] = "embeddings_floats" id: str embeddings: typing.List[typing.List[float]] @@ -28,7 +28,7 @@ class Config: extra = pydantic.Extra.allow -class EmbedResponse_EmbeddingsByType(UncheckedBaseModel): +class EmbeddingsByTypeEmbedResponse(UncheckedBaseModel): response_type: typing.Literal["embeddings_by_type"] = "embeddings_by_type" id: str embeddings: EmbedByTypeResponseEmbeddings @@ -46,6 +46,6 @@ class Config: EmbedResponse = typing_extensions.Annotated[ - typing.Union[EmbedResponse_EmbeddingsFloats, EmbedResponse_EmbeddingsByType], + typing.Union[EmbeddingsFloatsEmbedResponse, EmbeddingsByTypeEmbedResponse], UnionMetadata(discriminant="response_type"), ] diff --git a/src/cohere/types/generate_streamed_response.py b/src/cohere/types/generate_streamed_response.py index d6b74f4bc..a7694cbbe 100644 --- a/src/cohere/types/generate_streamed_response.py +++ b/src/cohere/types/generate_streamed_response.py @@ -11,7 +11,7 @@ from ..core.unchecked_base_model import UnionMetadata -class GenerateStreamedResponse_TextGeneration(UncheckedBaseModel): +class TextGenerationGenerateStreamedResponse(UncheckedBaseModel): """ Response in content type stream when `stream` is `true` in the request parameters. Generation tokens are streamed with the GenerationStream response. The final response is of type GenerationFinalResponse. """ @@ -31,7 +31,7 @@ class Config: extra = pydantic.Extra.allow -class GenerateStreamedResponse_StreamEnd(UncheckedBaseModel): +class StreamEndGenerateStreamedResponse(UncheckedBaseModel): """ Response in content type stream when `stream` is `true` in the request parameters. Generation tokens are streamed with the GenerationStream response. The final response is of type GenerationFinalResponse. """ @@ -51,7 +51,7 @@ class Config: extra = pydantic.Extra.allow -class GenerateStreamedResponse_StreamError(UncheckedBaseModel): +class StreamErrorGenerateStreamedResponse(UncheckedBaseModel): """ Response in content type stream when `stream` is `true` in the request parameters. Generation tokens are streamed with the GenerationStream response. The final response is of type GenerationFinalResponse. """ @@ -74,9 +74,7 @@ class Config: GenerateStreamedResponse = typing_extensions.Annotated[ typing.Union[ - GenerateStreamedResponse_TextGeneration, - GenerateStreamedResponse_StreamEnd, - GenerateStreamedResponse_StreamError, + TextGenerationGenerateStreamedResponse, StreamEndGenerateStreamedResponse, StreamErrorGenerateStreamedResponse ], UnionMetadata(discriminant="event_type"), ] diff --git a/src/cohere/types/images.py b/src/cohere/types/images.py new file mode 100644 index 000000000..5c829189a --- /dev/null +++ b/src/cohere/types/images.py @@ -0,0 +1,50 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from .embedding_type import EmbeddingType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class Images(UncheckedBaseModel): + images: typing.List[str] = pydantic.Field() + """ + An array of image data URIs for the model to embed. Maximum number of images per call is `1`. + The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB. + """ + + model: str = pydantic.Field() + """ + Defaults to embed-english-v2.0 + The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. + Available models and corresponding embedding dimensions: + + - `embed-english-v3.0` 1024 + - `embed-multilingual-v3.0` 1024 + - `embed-english-light-v3.0` 384 + - `embed-multilingual-light-v3.0` 384 + - `embed-english-v2.0` 4096 + - `embed-english-light-v2.0` 1024 + - `embed-multilingual-v2.0` 768 + """ + + embedding_types: typing.Optional[typing.List[EmbeddingType]] = pydantic.Field(default=None) + """ + Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. + + - `"float"`: Use this when you want to get back the default float embeddings. Valid for all models. + - `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models. + - `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models. + - `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models. + - `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/json_response_format2.py b/src/cohere/types/json_response_format_v2.py similarity index 86% rename from src/cohere/v2/types/json_response_format2.py rename to src/cohere/types/json_response_format_v2.py index c0a45d1ee..3819d8643 100644 --- a/src/cohere/v2/types/json_response_format2.py +++ b/src/cohere/types/json_response_format_v2.py @@ -1,12 +1,12 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 -class JsonResponseFormat2(UncheckedBaseModel): +class JsonResponseFormatV2(UncheckedBaseModel): json_schema: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) """ [BETA] A JSON schema object that the output will adhere to. There are some restrictions we have on the schema, refer to [our guide](/docs/structured-outputs-json#schema-constraints) for more information. diff --git a/src/cohere/types/message.py b/src/cohere/types/message.py index d4edc57e8..cdb690afb 100644 --- a/src/cohere/types/message.py +++ b/src/cohere/types/message.py @@ -11,7 +11,7 @@ from ..core.unchecked_base_model import UnionMetadata -class Message_Chatbot(UncheckedBaseModel): +class ChatbotMessage(UncheckedBaseModel): role: typing.Literal["CHATBOT"] = "CHATBOT" message: str tool_calls: typing.Optional[typing.List[ToolCall]] = None @@ -26,7 +26,7 @@ class Config: extra = pydantic.Extra.allow -class Message_System(UncheckedBaseModel): +class SystemMessage(UncheckedBaseModel): role: typing.Literal["SYSTEM"] = "SYSTEM" message: str tool_calls: typing.Optional[typing.List[ToolCall]] = None @@ -41,7 +41,7 @@ class Config: extra = pydantic.Extra.allow -class Message_User(UncheckedBaseModel): +class UserMessage(UncheckedBaseModel): role: typing.Literal["USER"] = "USER" message: str tool_calls: typing.Optional[typing.List[ToolCall]] = None @@ -56,7 +56,7 @@ class Config: extra = pydantic.Extra.allow -class Message_Tool(UncheckedBaseModel): +class ToolMessage(UncheckedBaseModel): role: typing.Literal["TOOL"] = "TOOL" tool_results: typing.Optional[typing.List[ToolResult]] = None @@ -71,5 +71,5 @@ class Config: Message = typing_extensions.Annotated[ - typing.Union[Message_Chatbot, Message_System, Message_User, Message_Tool], UnionMetadata(discriminant="role") + typing.Union[ChatbotMessage, SystemMessage, UserMessage, ToolMessage], UnionMetadata(discriminant="role") ] diff --git a/src/cohere/types/response_format.py b/src/cohere/types/response_format.py index 458244ff5..dae43927f 100644 --- a/src/cohere/types/response_format.py +++ b/src/cohere/types/response_format.py @@ -10,7 +10,7 @@ from ..core.unchecked_base_model import UnionMetadata -class ResponseFormat_Text(UncheckedBaseModel): +class TextResponseFormat(UncheckedBaseModel): """ Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models. @@ -34,7 +34,7 @@ class Config: extra = pydantic.Extra.allow -class ResponseFormat_JsonObject(UncheckedBaseModel): +class JsonObjectResponseFormat(UncheckedBaseModel): """ Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models. @@ -62,5 +62,5 @@ class Config: ResponseFormat = typing_extensions.Annotated[ - typing.Union[ResponseFormat_Text, ResponseFormat_JsonObject], UnionMetadata(discriminant="type") + typing.Union[TextResponseFormat, JsonObjectResponseFormat], UnionMetadata(discriminant="type") ] diff --git a/src/cohere/v2/types/response_format2.py b/src/cohere/types/response_format_v2.py similarity index 86% rename from src/cohere/v2/types/response_format2.py rename to src/cohere/types/response_format_v2.py index 6d3316b3c..3d3b21600 100644 --- a/src/cohere/v2/types/response_format2.py +++ b/src/cohere/types/response_format_v2.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic import typing_extensions -from ...core.unchecked_base_model import UnionMetadata +from ..core.unchecked_base_model import UnionMetadata -class ResponseFormat2_Text(UncheckedBaseModel): +class TextResponseFormatV2(UncheckedBaseModel): """ Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/docs/command-r), [Command R+](https://docs.cohere.com/docs/command-r-plus) and newer models. @@ -33,7 +33,7 @@ class Config: extra = pydantic.Extra.allow -class ResponseFormat2_JsonObject(UncheckedBaseModel): +class JsonObjectResponseFormatV2(UncheckedBaseModel): """ Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/docs/command-r), [Command R+](https://docs.cohere.com/docs/command-r-plus) and newer models. @@ -58,6 +58,6 @@ class Config: extra = pydantic.Extra.allow -ResponseFormat2 = typing_extensions.Annotated[ - typing.Union[ResponseFormat2_Text, ResponseFormat2_JsonObject], UnionMetadata(discriminant="type") +ResponseFormatV2 = typing_extensions.Annotated[ + typing.Union[TextResponseFormatV2, JsonObjectResponseFormatV2], UnionMetadata(discriminant="type") ] diff --git a/src/cohere/v2/types/source.py b/src/cohere/types/source.py similarity index 77% rename from src/cohere/v2/types/source.py rename to src/cohere/types/source.py index a96fc9e6c..0087cf628 100644 --- a/src/cohere/v2/types/source.py +++ b/src/cohere/types/source.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic import typing_extensions -from ...core.unchecked_base_model import UnionMetadata +from ..core.unchecked_base_model import UnionMetadata -class Source_Tool(UncheckedBaseModel): +class ToolSource(UncheckedBaseModel): """ A source object containing information about the source of the data cited. """ @@ -28,7 +28,7 @@ class Config: extra = pydantic.Extra.allow -class Source_Document(UncheckedBaseModel): +class DocumentSource(UncheckedBaseModel): """ A source object containing information about the source of the data cited. """ @@ -47,4 +47,4 @@ class Config: extra = pydantic.Extra.allow -Source = typing_extensions.Annotated[typing.Union[Source_Tool, Source_Document], UnionMetadata(discriminant="type")] +Source = typing_extensions.Annotated[typing.Union[ToolSource, DocumentSource], UnionMetadata(discriminant="type")] diff --git a/src/cohere/types/streamed_chat_response.py b/src/cohere/types/streamed_chat_response.py index 0576316a8..32e525970 100644 --- a/src/cohere/types/streamed_chat_response.py +++ b/src/cohere/types/streamed_chat_response.py @@ -17,7 +17,7 @@ from ..core.unchecked_base_model import UnionMetadata -class StreamedChatResponse_StreamStart(UncheckedBaseModel): +class StreamStartStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -35,7 +35,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_SearchQueriesGeneration(UncheckedBaseModel): +class SearchQueriesGenerationStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -53,7 +53,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_SearchResults(UncheckedBaseModel): +class SearchResultsStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -72,7 +72,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_TextGeneration(UncheckedBaseModel): +class TextGenerationStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -90,7 +90,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_CitationGeneration(UncheckedBaseModel): +class CitationGenerationStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -108,7 +108,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_ToolCallsGeneration(UncheckedBaseModel): +class ToolCallsGenerationStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -127,7 +127,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_StreamEnd(UncheckedBaseModel): +class StreamEndStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -146,7 +146,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse_ToolCallsChunk(UncheckedBaseModel): +class ToolCallsChunkStreamedChatResponse(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -166,14 +166,14 @@ class Config: StreamedChatResponse = typing_extensions.Annotated[ typing.Union[ - StreamedChatResponse_StreamStart, - StreamedChatResponse_SearchQueriesGeneration, - StreamedChatResponse_SearchResults, - StreamedChatResponse_TextGeneration, - StreamedChatResponse_CitationGeneration, - StreamedChatResponse_ToolCallsGeneration, - StreamedChatResponse_StreamEnd, - StreamedChatResponse_ToolCallsChunk, + StreamStartStreamedChatResponse, + SearchQueriesGenerationStreamedChatResponse, + SearchResultsStreamedChatResponse, + TextGenerationStreamedChatResponse, + CitationGenerationStreamedChatResponse, + ToolCallsGenerationStreamedChatResponse, + StreamEndStreamedChatResponse, + ToolCallsChunkStreamedChatResponse, ], UnionMetadata(discriminant="event_type"), ] diff --git a/src/cohere/v2/types/streamed_chat_response2.py b/src/cohere/types/streamed_chat_response_v2.py similarity index 83% rename from src/cohere/v2/types/streamed_chat_response2.py rename to src/cohere/types/streamed_chat_response_v2.py index ab8c49249..50282f760 100644 --- a/src/cohere/v2/types/streamed_chat_response2.py +++ b/src/cohere/types/streamed_chat_response_v2.py @@ -1,10 +1,10 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chat_message_start_event_delta import ChatMessageStartEventDelta -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic from .chat_content_start_event_delta import ChatContentStartEventDelta from .chat_content_delta_event_delta import ChatContentDeltaEventDelta @@ -14,10 +14,10 @@ from .citation_start_event_delta import CitationStartEventDelta from .chat_message_end_event_delta import ChatMessageEndEventDelta import typing_extensions -from ...core.unchecked_base_model import UnionMetadata +from ..core.unchecked_base_model import UnionMetadata -class StreamedChatResponse2_MessageStart(UncheckedBaseModel): +class MessageStartStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -36,7 +36,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ContentStart(UncheckedBaseModel): +class ContentStartStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -55,7 +55,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ContentDelta(UncheckedBaseModel): +class ContentDeltaStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -74,7 +74,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ContentEnd(UncheckedBaseModel): +class ContentEndStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -92,7 +92,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ToolPlanDelta(UncheckedBaseModel): +class ToolPlanDeltaStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -110,7 +110,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ToolCallStart(UncheckedBaseModel): +class ToolCallStartStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -129,7 +129,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ToolCallDelta(UncheckedBaseModel): +class ToolCallDeltaStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -148,7 +148,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_ToolCallEnd(UncheckedBaseModel): +class ToolCallEndStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -166,7 +166,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_CitationStart(UncheckedBaseModel): +class CitationStartStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -185,7 +185,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_CitationEnd(UncheckedBaseModel): +class CitationEndStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -203,7 +203,7 @@ class Config: extra = pydantic.Extra.allow -class StreamedChatResponse2_MessageEnd(UncheckedBaseModel): +class MessageEndStreamedChatResponseV2(UncheckedBaseModel): """ StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request). """ @@ -222,19 +222,19 @@ class Config: extra = pydantic.Extra.allow -StreamedChatResponse2 = typing_extensions.Annotated[ +StreamedChatResponseV2 = typing_extensions.Annotated[ typing.Union[ - StreamedChatResponse2_MessageStart, - StreamedChatResponse2_ContentStart, - StreamedChatResponse2_ContentDelta, - StreamedChatResponse2_ContentEnd, - StreamedChatResponse2_ToolPlanDelta, - StreamedChatResponse2_ToolCallStart, - StreamedChatResponse2_ToolCallDelta, - StreamedChatResponse2_ToolCallEnd, - StreamedChatResponse2_CitationStart, - StreamedChatResponse2_CitationEnd, - StreamedChatResponse2_MessageEnd, + MessageStartStreamedChatResponseV2, + ContentStartStreamedChatResponseV2, + ContentDeltaStreamedChatResponseV2, + ContentEndStreamedChatResponseV2, + ToolPlanDeltaStreamedChatResponseV2, + ToolCallStartStreamedChatResponseV2, + ToolCallDeltaStreamedChatResponseV2, + ToolCallEndStreamedChatResponseV2, + CitationStartStreamedChatResponseV2, + CitationEndStreamedChatResponseV2, + MessageEndStreamedChatResponseV2, ], UnionMetadata(discriminant="type"), ] diff --git a/src/cohere/v2/types/system_message.py b/src/cohere/types/system_message.py similarity index 83% rename from src/cohere/v2/types/system_message.py rename to src/cohere/types/system_message.py index 23fce20ee..cdc8460a2 100644 --- a/src/cohere/v2/types/system_message.py +++ b/src/cohere/types/system_message.py @@ -1,8 +1,8 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel from .system_message_content import SystemMessageContent -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing import pydantic diff --git a/src/cohere/v2/types/system_message_content.py b/src/cohere/types/system_message_content.py similarity index 100% rename from src/cohere/v2/types/system_message_content.py rename to src/cohere/types/system_message_content.py diff --git a/src/cohere/v2/types/system_message_content_item.py b/src/cohere/types/system_message_content_item.py similarity index 58% rename from src/cohere/v2/types/system_message_content_item.py rename to src/cohere/types/system_message_content_item.py index ed95a8a90..6865810cd 100644 --- a/src/cohere/v2/types/system_message_content_item.py +++ b/src/cohere/types/system_message_content_item.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic import typing_extensions -from ...core.unchecked_base_model import UnionMetadata +from ..core.unchecked_base_model import UnionMetadata -class SystemMessageContentItem_Text(UncheckedBaseModel): +class TextSystemMessageContentItem(UncheckedBaseModel): type: typing.Literal["text"] = "text" text: str @@ -23,6 +23,4 @@ class Config: extra = pydantic.Extra.allow -SystemMessageContentItem = typing_extensions.Annotated[ - SystemMessageContentItem_Text, UnionMetadata(discriminant="type") -] +SystemMessageContentItem = typing_extensions.Annotated[TextSystemMessageContentItem, UnionMetadata(discriminant="type")] diff --git a/src/cohere/v2/types/text_content.py b/src/cohere/types/text_content.py similarity index 81% rename from src/cohere/v2/types/text_content.py rename to src/cohere/types/text_content.py index 4e3ade261..338597155 100644 --- a/src/cohere/v2/types/text_content.py +++ b/src/cohere/types/text_content.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing import pydantic diff --git a/src/cohere/v2/types/text_response_format2.py b/src/cohere/types/text_response_format_v2.py similarity index 70% rename from src/cohere/v2/types/text_response_format2.py rename to src/cohere/types/text_response_format_v2.py index 3a6866d58..2dc6310cb 100644 --- a/src/cohere/v2/types/text_response_format2.py +++ b/src/cohere/types/text_response_format_v2.py @@ -1,12 +1,12 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing import pydantic -class TextResponseFormat2(UncheckedBaseModel): +class TextResponseFormatV2(UncheckedBaseModel): if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/cohere/types/texts.py b/src/cohere/types/texts.py new file mode 100644 index 000000000..2f2d0978a --- /dev/null +++ b/src/cohere/types/texts.py @@ -0,0 +1,62 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from .embedding_type import EmbeddingType +from .texts_truncate import TextsTruncate +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class Texts(UncheckedBaseModel): + texts: typing.List[str] = pydantic.Field() + """ + An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality. + """ + + model: str = pydantic.Field() + """ + Defaults to embed-english-v2.0 + + The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. + + Available models and corresponding embedding dimensions: + + - `embed-english-v3.0` 1024 + - `embed-multilingual-v3.0` 1024 + - `embed-english-light-v3.0` 384 + - `embed-multilingual-light-v3.0` 384 + + - `embed-english-v2.0` 4096 + - `embed-english-light-v2.0` 1024 + - `embed-multilingual-v2.0` 768 + """ + + embedding_types: typing.Optional[typing.List[EmbeddingType]] = pydantic.Field(default=None) + """ + Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. + + - `"float"`: Use this when you want to get back the default float embeddings. Valid for all models. + - `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models. + - `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models. + - `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models. + - `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models. + """ + + truncate: typing.Optional[TextsTruncate] = pydantic.Field(default=None) + """ + One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. + + Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + + If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/types/texts_truncate.py b/src/cohere/types/texts_truncate.py new file mode 100644 index 000000000..b0e2faf0e --- /dev/null +++ b/src/cohere/types/texts_truncate.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TextsTruncate = typing.Union[typing.Literal["NONE", "START", "END"], typing.Any] diff --git a/src/cohere/v2/types/tool_call2.py b/src/cohere/types/tool_call_v2.py similarity index 67% rename from src/cohere/v2/types/tool_call2.py rename to src/cohere/types/tool_call_v2.py index 2f3e0cd5a..e97171de7 100644 --- a/src/cohere/v2/types/tool_call2.py +++ b/src/cohere/types/tool_call_v2.py @@ -1,20 +1,20 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from .tool_call2function import ToolCall2Function -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from .tool_call_v2function import ToolCallV2Function +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -class ToolCall2(UncheckedBaseModel): +class ToolCallV2(UncheckedBaseModel): """ A array of tool calls to be made. """ id: typing.Optional[str] = None type: typing.Optional[typing.Literal["function"]] = None - function: typing.Optional[ToolCall2Function] = None + function: typing.Optional[ToolCallV2Function] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/cohere/v2/types/tool_call2function.py b/src/cohere/types/tool_call_v2function.py similarity index 74% rename from src/cohere/v2/types/tool_call2function.py rename to src/cohere/types/tool_call_v2function.py index 1adce7fcd..6e27943c9 100644 --- a/src/cohere/v2/types/tool_call2function.py +++ b/src/cohere/types/tool_call_v2function.py @@ -1,12 +1,12 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -class ToolCall2Function(UncheckedBaseModel): +class ToolCallV2Function(UncheckedBaseModel): name: typing.Optional[str] = None arguments: typing.Optional[str] = None diff --git a/src/cohere/types/tool_content.py b/src/cohere/types/tool_content.py new file mode 100644 index 000000000..8a599572d --- /dev/null +++ b/src/cohere/types/tool_content.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from .document import Document +import typing_extensions +from ..core.unchecked_base_model import UnionMetadata + + +class TextToolContent(UncheckedBaseModel): + """ + A content block which contains information about the content of a tool result + """ + + type: typing.Literal["text"] = "text" + text: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class DocumentToolContent(UncheckedBaseModel): + """ + A content block which contains information about the content of a tool result + """ + + type: typing.Literal["document"] = "document" + document: Document + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +ToolContent = typing_extensions.Annotated[ + typing.Union[TextToolContent, DocumentToolContent], UnionMetadata(discriminant="type") +] diff --git a/src/cohere/types/tool_message_v2.py b/src/cohere/types/tool_message_v2.py new file mode 100644 index 000000000..8040a3c4c --- /dev/null +++ b/src/cohere/types/tool_message_v2.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from .tool_message_v2tool_content import ToolMessageV2ToolContent +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class ToolMessageV2(UncheckedBaseModel): + """ + A message from the system. + """ + + tool_call_id: str = pydantic.Field() + """ + The id of the associated tool call that has provided the given content + """ + + tool_content: ToolMessageV2ToolContent = pydantic.Field() + """ + A single or list of outputs from a tool. The content should formatted as a JSON object string, or a list of tool content blocks + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/types/tool_message_v2tool_content.py b/src/cohere/types/tool_message_v2tool_content.py new file mode 100644 index 000000000..e6d5655b4 --- /dev/null +++ b/src/cohere/types/tool_message_v2tool_content.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_content import ToolContent + +ToolMessageV2ToolContent = typing.Union[str, typing.List[ToolContent]] diff --git a/src/cohere/v2/types/tool_source.py b/src/cohere/types/tool_source.py similarity index 84% rename from src/cohere/v2/types/tool_source.py rename to src/cohere/types/tool_source.py index 772229531..1ece16490 100644 --- a/src/cohere/v2/types/tool_source.py +++ b/src/cohere/types/tool_source.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ToolSource(UncheckedBaseModel): diff --git a/src/cohere/types/tool_v2.py b/src/cohere/types/tool_v2.py new file mode 100644 index 000000000..c5ce92300 --- /dev/null +++ b/src/cohere/types/tool_v2.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .tool_v2function import ToolV2Function +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ToolV2(UncheckedBaseModel): + type: typing.Optional[typing.Literal["function"]] = None + function: typing.Optional[ToolV2Function] = pydantic.Field(default=None) + """ + The function to be executed. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/tool2function.py b/src/cohere/types/tool_v2function.py similarity index 84% rename from src/cohere/v2/types/tool2function.py rename to src/cohere/types/tool_v2function.py index cdc535826..a55d1e1bd 100644 --- a/src/cohere/v2/types/tool2function.py +++ b/src/cohere/types/tool_v2function.py @@ -1,12 +1,12 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 -class Tool2Function(UncheckedBaseModel): +class ToolV2Function(UncheckedBaseModel): """ The function to be executed. """ diff --git a/src/cohere/v2/types/usage.py b/src/cohere/types/usage.py similarity index 84% rename from src/cohere/v2/types/usage.py rename to src/cohere/types/usage.py index e99efbdfa..3e7cdf51e 100644 --- a/src/cohere/v2/types/usage.py +++ b/src/cohere/types/usage.py @@ -1,10 +1,10 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing from .usage_billed_units import UsageBilledUnits from .usage_tokens import UsageTokens -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/cohere/v2/types/usage_billed_units.py b/src/cohere/types/usage_billed_units.py similarity index 89% rename from src/cohere/v2/types/usage_billed_units.py rename to src/cohere/types/usage_billed_units.py index abe2925b9..bfb82fd95 100644 --- a/src/cohere/v2/types/usage_billed_units.py +++ b/src/cohere/types/usage_billed_units.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class UsageBilledUnits(UncheckedBaseModel): diff --git a/src/cohere/v2/types/usage_tokens.py b/src/cohere/types/usage_tokens.py similarity index 86% rename from src/cohere/v2/types/usage_tokens.py rename to src/cohere/types/usage_tokens.py index f10e583b6..c9b83d4ba 100644 --- a/src/cohere/v2/types/usage_tokens.py +++ b/src/cohere/types/usage_tokens.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel import typing import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class UsageTokens(UncheckedBaseModel): diff --git a/src/cohere/v2/types/user_message.py b/src/cohere/types/user_message.py similarity index 69% rename from src/cohere/v2/types/user_message.py rename to src/cohere/types/user_message.py index f73c47177..25339d292 100644 --- a/src/cohere/v2/types/user_message.py +++ b/src/cohere/types/user_message.py @@ -1,11 +1,10 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.unchecked_base_model import UncheckedBaseModel +from ..core.unchecked_base_model import UncheckedBaseModel from .user_message_content import UserMessageContent import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -from ...types.chat_document import ChatDocument -from ...core.pydantic_utilities import IS_PYDANTIC_V2 class UserMessage(UncheckedBaseModel): @@ -19,11 +18,6 @@ class UserMessage(UncheckedBaseModel): If a string is provided, it will be treated as a text content block. """ - documents: typing.Optional[typing.List[ChatDocument]] = pydantic.Field(default=None) - """ - Documents seen by the model when generating the reply. - """ - if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/cohere/v2/types/user_message_content.py b/src/cohere/types/user_message_content.py similarity index 100% rename from src/cohere/v2/types/user_message_content.py rename to src/cohere/types/user_message_content.py diff --git a/src/cohere/utils.py b/src/cohere/utils.py index 6d33559ae..de7ab65a3 100644 --- a/src/cohere/utils.py +++ b/src/cohere/utils.py @@ -8,7 +8,7 @@ import requests from fastavro import parse_schema, reader, writer -from . import EmbedResponse, EmbedResponse_EmbeddingsFloats, EmbedResponse_EmbeddingsByType, ApiMeta, \ +from . import EmbedResponse, EmbeddingsFloatsEmbedResponse, EmbeddingsByTypeEmbedResponse, ApiMeta, \ EmbedByTypeResponseEmbeddings, ApiMetaBilledUnits, EmbedJob, CreateEmbedJobResponse, Dataset from .datasets import DatasetsCreateResponse, DatasetsGetResponse from .overrides import get_fields @@ -194,7 +194,7 @@ def merge_embed_responses(responses: typing.List[EmbedResponse]) -> EmbedRespons ] if responses[0].response_type == "embeddings_floats": - embeddings_floats = typing.cast(typing.List[EmbedResponse_EmbeddingsFloats], responses) + embeddings_floats = typing.cast(typing.List[EmbeddingsFloatsEmbedResponse], responses) embeddings = [ embedding @@ -202,7 +202,7 @@ def merge_embed_responses(responses: typing.List[EmbedResponse]) -> EmbedRespons for embedding in embeddings_floats.embeddings ] - return EmbedResponse_EmbeddingsFloats( + return EmbeddingsFloatsEmbedResponse( response_type="embeddings_floats", id=response_id, texts=texts, @@ -210,7 +210,7 @@ def merge_embed_responses(responses: typing.List[EmbedResponse]) -> EmbedRespons meta=meta ) else: - embeddings_type = typing.cast(typing.List[EmbedResponse_EmbeddingsByType], responses) + embeddings_type = typing.cast(typing.List[EmbeddingsByTypeEmbedResponse], responses) embeddings_by_type = [ response.embeddings @@ -231,7 +231,7 @@ def merge_embed_responses(responses: typing.List[EmbedResponse]) -> EmbedRespons embeddings_by_type_merged = EmbedByTypeResponseEmbeddings.parse_obj(merged_dicts) - return EmbedResponse_EmbeddingsByType( + return EmbeddingsByTypeEmbedResponse( response_type="embeddings_by_type", id=response_id, embeddings=embeddings_by_type_merged, diff --git a/src/cohere/v2/__init__.py b/src/cohere/v2/__init__.py index 922af5e29..4a904803f 100644 --- a/src/cohere/v2/__init__.py +++ b/src/cohere/v2/__init__.py @@ -1,185 +1,23 @@ # This file was auto-generated by Fern from our API Definition. from .types import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentItem, - AssistantMessageContentItem_Text, - AssistantMessageResponse, - AssistantMessageResponseContentItem, - AssistantMessageResponseContentItem_Text, - ChatContentDeltaEvent, - ChatContentDeltaEventDelta, - ChatContentDeltaEventDeltaMessage, - ChatContentDeltaEventDeltaMessageContent, - ChatContentEndEvent, - ChatContentStartEvent, - ChatContentStartEventDelta, - ChatContentStartEventDeltaMessage, - ChatContentStartEventDeltaMessageContent, - ChatFinishReason, - ChatMessage2, - ChatMessage2_Assistant, - ChatMessage2_System, - ChatMessage2_Tool, - ChatMessage2_User, - ChatMessageEndEvent, - ChatMessageEndEventDelta, - ChatMessageStartEvent, - ChatMessageStartEventDelta, - ChatMessageStartEventDeltaMessage, - ChatMessages, - ChatStreamEventType, - ChatToolCallDeltaEvent, - ChatToolCallDeltaEventDelta, - ChatToolCallDeltaEventDeltaToolCall, - ChatToolCallDeltaEventDeltaToolCallFunction, - ChatToolCallEndEvent, - ChatToolCallStartEvent, - ChatToolCallStartEventDelta, - ChatToolCallStartEventDeltaToolCall, - ChatToolCallStartEventDeltaToolCallFunction, - ChatToolPlanDeltaEvent, - ChatToolPlanDeltaEventDelta, - Citation, - CitationEndEvent, - CitationStartEvent, - CitationStartEventDelta, - CitationStartEventDeltaMessage, - Content, - Content_Text, - DocumentSource, - JsonResponseFormat2, - NonStreamedChatResponse2, - ResponseFormat2, - ResponseFormat2_JsonObject, - ResponseFormat2_Text, - Source, - Source_Document, - Source_Tool, - StreamedChatResponse2, - StreamedChatResponse2_CitationEnd, - StreamedChatResponse2_CitationStart, - StreamedChatResponse2_ContentDelta, - StreamedChatResponse2_ContentEnd, - StreamedChatResponse2_ContentStart, - StreamedChatResponse2_MessageEnd, - StreamedChatResponse2_MessageStart, - StreamedChatResponse2_ToolCallDelta, - StreamedChatResponse2_ToolCallEnd, - StreamedChatResponse2_ToolCallStart, - StreamedChatResponse2_ToolPlanDelta, - SystemMessage, - SystemMessageContent, - SystemMessageContentItem, - SystemMessageContentItem_Text, - TextContent, - TextResponseFormat2, - Tool2, - Tool2Function, - ToolCall2, - ToolCall2Function, - ToolMessage2, - ToolSource, - Usage, - UsageBilledUnits, - UsageTokens, - UserMessage, - UserMessageContent, - V2ChatRequestCitationMode, + V2ChatRequestDocumentsItem, V2ChatRequestSafetyMode, - V2ChatStreamRequestCitationMode, + V2ChatStreamRequestDocumentsItem, V2ChatStreamRequestSafetyMode, + V2RerankRequestDocumentsItem, + V2RerankResponse, + V2RerankResponseResultsItem, + V2RerankResponseResultsItemDocument, ) __all__ = [ - "AssistantMessage", - "AssistantMessageContent", - "AssistantMessageContentItem", - "AssistantMessageContentItem_Text", - "AssistantMessageResponse", - "AssistantMessageResponseContentItem", - "AssistantMessageResponseContentItem_Text", - "ChatContentDeltaEvent", - "ChatContentDeltaEventDelta", - "ChatContentDeltaEventDeltaMessage", - "ChatContentDeltaEventDeltaMessageContent", - "ChatContentEndEvent", - "ChatContentStartEvent", - "ChatContentStartEventDelta", - "ChatContentStartEventDeltaMessage", - "ChatContentStartEventDeltaMessageContent", - "ChatFinishReason", - "ChatMessage2", - "ChatMessage2_Assistant", - "ChatMessage2_System", - "ChatMessage2_Tool", - "ChatMessage2_User", - "ChatMessageEndEvent", - "ChatMessageEndEventDelta", - "ChatMessageStartEvent", - "ChatMessageStartEventDelta", - "ChatMessageStartEventDeltaMessage", - "ChatMessages", - "ChatStreamEventType", - "ChatToolCallDeltaEvent", - "ChatToolCallDeltaEventDelta", - "ChatToolCallDeltaEventDeltaToolCall", - "ChatToolCallDeltaEventDeltaToolCallFunction", - "ChatToolCallEndEvent", - "ChatToolCallStartEvent", - "ChatToolCallStartEventDelta", - "ChatToolCallStartEventDeltaToolCall", - "ChatToolCallStartEventDeltaToolCallFunction", - "ChatToolPlanDeltaEvent", - "ChatToolPlanDeltaEventDelta", - "Citation", - "CitationEndEvent", - "CitationStartEvent", - "CitationStartEventDelta", - "CitationStartEventDeltaMessage", - "Content", - "Content_Text", - "DocumentSource", - "JsonResponseFormat2", - "NonStreamedChatResponse2", - "ResponseFormat2", - "ResponseFormat2_JsonObject", - "ResponseFormat2_Text", - "Source", - "Source_Document", - "Source_Tool", - "StreamedChatResponse2", - "StreamedChatResponse2_CitationEnd", - "StreamedChatResponse2_CitationStart", - "StreamedChatResponse2_ContentDelta", - "StreamedChatResponse2_ContentEnd", - "StreamedChatResponse2_ContentStart", - "StreamedChatResponse2_MessageEnd", - "StreamedChatResponse2_MessageStart", - "StreamedChatResponse2_ToolCallDelta", - "StreamedChatResponse2_ToolCallEnd", - "StreamedChatResponse2_ToolCallStart", - "StreamedChatResponse2_ToolPlanDelta", - "SystemMessage", - "SystemMessageContent", - "SystemMessageContentItem", - "SystemMessageContentItem_Text", - "TextContent", - "TextResponseFormat2", - "Tool2", - "Tool2Function", - "ToolCall2", - "ToolCall2Function", - "ToolMessage2", - "ToolSource", - "Usage", - "UsageBilledUnits", - "UsageTokens", - "UserMessage", - "UserMessageContent", - "V2ChatRequestCitationMode", + "V2ChatRequestDocumentsItem", "V2ChatRequestSafetyMode", - "V2ChatStreamRequestCitationMode", + "V2ChatStreamRequestDocumentsItem", "V2ChatStreamRequestSafetyMode", + "V2RerankRequestDocumentsItem", + "V2RerankResponse", + "V2RerankResponseResultsItem", + "V2RerankResponseResultsItemDocument", ] diff --git a/src/cohere/v2/client.py b/src/cohere/v2/client.py index 34340141f..b504acfb5 100644 --- a/src/cohere/v2/client.py +++ b/src/cohere/v2/client.py @@ -2,13 +2,14 @@ import typing from ..core.client_wrapper import SyncClientWrapper -from .types.chat_messages import ChatMessages -from .types.tool2 import Tool2 -from .types.v2chat_stream_request_citation_mode import V2ChatStreamRequestCitationMode -from .types.response_format2 import ResponseFormat2 +from ..types.chat_messages import ChatMessages +from ..types.tool_v2 import ToolV2 +from .types.v2chat_stream_request_documents_item import V2ChatStreamRequestDocumentsItem +from ..types.citation_options import CitationOptions +from ..types.response_format_v2 import ResponseFormatV2 from .types.v2chat_stream_request_safety_mode import V2ChatStreamRequestSafetyMode from ..core.request_options import RequestOptions -from .types.streamed_chat_response2 import StreamedChatResponse2 +from ..types.streamed_chat_response_v2 import StreamedChatResponseV2 from ..core.serialization import convert_and_respect_annotation_metadata import httpx_sse from ..core.unchecked_base_model import construct_type @@ -31,9 +32,13 @@ from ..types.gateway_timeout_error_body import GatewayTimeoutErrorBody from json.decoder import JSONDecodeError from ..core.api_error import ApiError -from .types.v2chat_request_citation_mode import V2ChatRequestCitationMode +from .types.v2chat_request_documents_item import V2ChatRequestDocumentsItem from .types.v2chat_request_safety_mode import V2ChatRequestSafetyMode -from .types.non_streamed_chat_response2 import NonStreamedChatResponse2 +from ..types.chat_response import ChatResponse +from ..types.embed_request_v2 import EmbedRequestV2 +from ..types.embed_by_type_response import EmbedByTypeResponse +from .types.v2rerank_request_documents_item import V2RerankRequestDocumentsItem +from .types.v2rerank_response import V2RerankResponse from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -49,9 +54,10 @@ def chat_stream( *, model: str, messages: ChatMessages, - tools: typing.Optional[typing.Sequence[Tool2]] = OMIT, - citation_mode: typing.Optional[V2ChatStreamRequestCitationMode] = OMIT, - response_format: typing.Optional[ResponseFormat2] = OMIT, + tools: typing.Optional[typing.Sequence[ToolV2]] = OMIT, + documents: typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]] = OMIT, + citation_options: typing.Optional[CitationOptions] = OMIT, + response_format: typing.Optional[ResponseFormatV2] = OMIT, safety_mode: typing.Optional[V2ChatStreamRequestSafetyMode] = OMIT, max_tokens: typing.Optional[int] = OMIT, stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT, @@ -63,7 +69,7 @@ def chat_stream( p: typing.Optional[float] = OMIT, return_prompt: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Iterator[StreamedChatResponse2]: + ) -> typing.Iterator[StreamedChatResponseV2]: """ Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides. @@ -74,18 +80,19 @@ def chat_stream( messages : ChatMessages - tools : typing.Optional[typing.Sequence[Tool2]] + tools : typing.Optional[typing.Sequence[ToolV2]] A list of available tools (functions) that the model may suggest invoking before producing a text response. When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - citation_mode : typing.Optional[V2ChatStreamRequestCitationMode] - Defaults to `"accurate"`. - Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + documents : typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]] + A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. - response_format : typing.Optional[ResponseFormat2] + citation_options : typing.Optional[CitationOptions] + + response_format : typing.Optional[ResponseFormatV2] safety_mode : typing.Optional[V2ChatStreamRequestSafetyMode] Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. @@ -149,17 +156,18 @@ def chat_stream( Yields ------ - typing.Iterator[StreamedChatResponse2] + typing.Iterator[StreamedChatResponseV2] Examples -------- - from cohere import Client - from cohere.v2 import ( - ChatMessage2_User, - ResponseFormat2_Text, - Tool2, - Tool2Function, + from cohere import ( + CitationOptions, + Client, + TextResponseFormatV2, + ToolV2, + ToolV2Function, + UserChatMessageV2, ) client = Client( @@ -169,22 +177,24 @@ def chat_stream( response = client.v2.chat_stream( model="string", messages=[ - ChatMessage2_User( + UserChatMessageV2( content="string", - documents=[{"string": {"key": "value"}}], ) ], tools=[ - Tool2( - function=Tool2Function( + ToolV2( + function=ToolV2Function( name="string", description="string", parameters={"string": {"key": "value"}}, ), ) ], - citation_mode="FAST", - response_format=ResponseFormat2_Text(), + documents=["string"], + citation_options=CitationOptions( + mode="FAST", + ), + response_format=TextResponseFormatV2(), safety_mode="CONTEXTUAL", max_tokens=1, stop_sequences=["string"], @@ -208,11 +218,16 @@ def chat_stream( object_=messages, annotation=ChatMessages, direction="write" ), "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[Tool2], direction="write" + object_=tools, annotation=typing.Sequence[ToolV2], direction="write" + ), + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2ChatStreamRequestDocumentsItem], direction="write" + ), + "citation_options": convert_and_respect_annotation_metadata( + object_=citation_options, annotation=CitationOptions, direction="write" ), - "citation_mode": citation_mode, "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormat2, direction="write" + object_=response_format, annotation=ResponseFormatV2, direction="write" ), "safety_mode": safety_mode, "max_tokens": max_tokens, @@ -235,9 +250,9 @@ def chat_stream( for _sse in _event_source.iter_sse(): try: yield typing.cast( - StreamedChatResponse2, + StreamedChatResponseV2, construct_type( - type_=StreamedChatResponse2, # type: ignore + type_=StreamedChatResponseV2, # type: ignore object_=json.loads(_sse.data), ), ) @@ -365,9 +380,10 @@ def chat( *, model: str, messages: ChatMessages, - tools: typing.Optional[typing.Sequence[Tool2]] = OMIT, - citation_mode: typing.Optional[V2ChatRequestCitationMode] = OMIT, - response_format: typing.Optional[ResponseFormat2] = OMIT, + tools: typing.Optional[typing.Sequence[ToolV2]] = OMIT, + documents: typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]] = OMIT, + citation_options: typing.Optional[CitationOptions] = OMIT, + response_format: typing.Optional[ResponseFormatV2] = OMIT, safety_mode: typing.Optional[V2ChatRequestSafetyMode] = OMIT, max_tokens: typing.Optional[int] = OMIT, stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT, @@ -379,7 +395,7 @@ def chat( p: typing.Optional[float] = OMIT, return_prompt: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> NonStreamedChatResponse2: + ) -> ChatResponse: """ Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides. @@ -390,18 +406,19 @@ def chat( messages : ChatMessages - tools : typing.Optional[typing.Sequence[Tool2]] + tools : typing.Optional[typing.Sequence[ToolV2]] A list of available tools (functions) that the model may suggest invoking before producing a text response. When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - citation_mode : typing.Optional[V2ChatRequestCitationMode] - Defaults to `"accurate"`. - Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + documents : typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]] + A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. - response_format : typing.Optional[ResponseFormat2] + citation_options : typing.Optional[CitationOptions] + + response_format : typing.Optional[ResponseFormatV2] safety_mode : typing.Optional[V2ChatRequestSafetyMode] Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. @@ -465,13 +482,12 @@ def chat( Returns ------- - NonStreamedChatResponse2 + ChatResponse Examples -------- - from cohere import Client - from cohere.v2 import ChatMessage2_Tool + from cohere import Client, ToolChatMessageV2 client = Client( client_name="YOUR_CLIENT_NAME", @@ -480,9 +496,9 @@ def chat( client.v2.chat( model="model", messages=[ - ChatMessage2_Tool( + ToolChatMessageV2( tool_call_id="messages", - tool_content=["messages"], + tool_content="messages", ) ], ) @@ -496,11 +512,16 @@ def chat( object_=messages, annotation=ChatMessages, direction="write" ), "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[Tool2], direction="write" + object_=tools, annotation=typing.Sequence[ToolV2], direction="write" + ), + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2ChatRequestDocumentsItem], direction="write" + ), + "citation_options": convert_and_respect_annotation_metadata( + object_=citation_options, annotation=CitationOptions, direction="write" ), - "citation_mode": citation_mode, "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormat2, direction="write" + object_=response_format, annotation=ResponseFormatV2, direction="write" ), "safety_mode": safety_mode, "max_tokens": max_tokens, @@ -520,9 +541,9 @@ def chat( try: if 200 <= _response.status_code < 300: return typing.cast( - NonStreamedChatResponse2, + ChatResponse, construct_type( - type_=NonStreamedChatResponse2, # type: ignore + type_=ChatResponse, # type: ignore object_=_response.json(), ), ) @@ -641,175 +662,857 @@ def chat( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def embed( + self, *, request: EmbedRequestV2, request_options: typing.Optional[RequestOptions] = None + ) -> EmbedByTypeResponse: + """ + This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. -class AsyncV2Client: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. - async def chat_stream( - self, - *, - model: str, - messages: ChatMessages, - tools: typing.Optional[typing.Sequence[Tool2]] = OMIT, - citation_mode: typing.Optional[V2ChatStreamRequestCitationMode] = OMIT, - response_format: typing.Optional[ResponseFormat2] = OMIT, - safety_mode: typing.Optional[V2ChatStreamRequestSafetyMode] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT, - temperature: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - k: typing.Optional[float] = OMIT, - p: typing.Optional[float] = OMIT, - return_prompt: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.AsyncIterator[StreamedChatResponse2]: - """ - Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides. + If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). Parameters ---------- - model : str - The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model. - - messages : ChatMessages - - tools : typing.Optional[typing.Sequence[Tool2]] - A list of available tools (functions) that the model may suggest invoking before producing a text response. - - When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - - - citation_mode : typing.Optional[V2ChatStreamRequestCitationMode] - Defaults to `"accurate"`. - Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. - - - response_format : typing.Optional[ResponseFormat2] - - safety_mode : typing.Optional[V2ChatStreamRequestSafetyMode] - Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. - When `NONE` is specified, the safety instruction will be omitted. - - Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters. - - **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024](/docs/command-r-plus#august-2024-release) and newer. - - Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments - - - max_tokens : typing.Optional[int] - The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. - - - stop_sequences : typing.Optional[typing.Sequence[str]] - A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. - - - temperature : typing.Optional[float] - Defaults to `0.3`. + request : EmbedRequestV2 - A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - Randomness can be further maximized by increasing the value of the `p` parameter. + Returns + ------- + EmbedByTypeResponse + OK + Examples + -------- + from cohere import Client, ImageEmbedRequestV2 - seed : typing.Optional[int] - If specified, the backend will make a best effort to sample tokens - deterministically, such that repeated requests with the same - seed and parameters should return the same result. However, - determinism cannot be totally guaranteed. + client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + client.v2.embed( + request=ImageEmbedRequestV2( + images=["string"], + model="string", + ), + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v2/embed", + method="POST", + json=convert_and_respect_annotation_metadata(object_=request, annotation=EmbedRequestV2, direction="write"), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EmbedByTypeResponse, + construct_type( + type_=EmbedByTypeResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 401: + raise UnauthorizedError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + UnprocessableEntityErrorBody, + construct_type( + type_=UnprocessableEntityErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + TooManyRequestsErrorBody, + construct_type( + type_=TooManyRequestsErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + typing.cast( + ClientClosedRequestErrorBody, + construct_type( + type_=ClientClosedRequestErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 500: + raise InternalServerError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 501: + raise NotImplementedError( + typing.cast( + NotImplementedErrorBody, + construct_type( + type_=NotImplementedErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + typing.cast( + GatewayTimeoutErrorBody, + construct_type( + type_=GatewayTimeoutErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def rerank( + self, + *, + model: str, + query: str, + documents: typing.Sequence[V2RerankRequestDocumentsItem], + top_n: typing.Optional[int] = OMIT, + rank_fields: typing.Optional[typing.Sequence[str]] = OMIT, + return_documents: typing.Optional[bool] = OMIT, + max_chunks_per_doc: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> V2RerankResponse: + """ + This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. - frequency_penalty : typing.Optional[float] - Defaults to `0.0`, min value of `0.0`, max value of `1.0`. - Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + Parameters + ---------- + model : str + The identifier of the model to use, one of : `rerank-english-v3.0`, `rerank-multilingual-v3.0`, `rerank-english-v2.0`, `rerank-multilingual-v2.0` + query : str + The search query - presence_penalty : typing.Optional[float] - Defaults to `0.0`, min value of `0.0`, max value of `1.0`. - Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + documents : typing.Sequence[V2RerankRequestDocumentsItem] + A list of document objects or strings to rerank. + If a document is provided the text fields is required and all other fields will be preserved in the response. + The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000. - k : typing.Optional[float] - Ensures only the top `k` most likely tokens are considered for generation at each step. - Defaults to `0`, min value of `0`, max value of `500`. + We recommend a maximum of 1,000 documents for optimal endpoint performance. + top_n : typing.Optional[int] + The number of most relevant documents or indices to return, defaults to the length of the documents - p : typing.Optional[float] - Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. - Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + rank_fields : typing.Optional[typing.Sequence[str]] + If a JSON object is provided, you can specify which keys you would like to have considered for reranking. The model will rerank based on order of the fields passed in (i.e. rank_fields=['title','author','text'] will rerank using the values in title, author, text sequentially. If the length of title, author, and text exceeds the context length of the model, the chunking will not re-consider earlier fields). If not provided, the model will use the default text field for ranking. + return_documents : typing.Optional[bool] + - If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request. + - If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request. - return_prompt : typing.Optional[bool] - Whether to return the prompt in the response. + max_chunks_per_doc : typing.Optional[int] + The maximum number of chunks to produce internally from a document request_options : typing.Optional[RequestOptions] Request-specific configuration. - Yields - ------ - typing.AsyncIterator[StreamedChatResponse2] - + Returns + ------- + V2RerankResponse + OK Examples -------- - import asyncio - - from cohere import AsyncClient - from cohere.v2 import ( - ChatMessage2_User, - ResponseFormat2_Text, - Tool2, - Tool2Function, - ) + from cohere import Client - client = AsyncClient( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) - - - async def main() -> None: - response = await client.v2.chat_stream( - model="string", - messages=[ - ChatMessage2_User( - content="string", - documents=[{"string": {"key": "value"}}], - ) - ], - tools=[ - Tool2( - function=Tool2Function( - name="string", - description="string", - parameters={"string": {"key": "value"}}, + client.v2.rerank( + model="model", + query="query", + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v2/rerank", + method="POST", + json={ + "model": model, + "query": query, + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2RerankRequestDocumentsItem], direction="write" + ), + "top_n": top_n, + "rank_fields": rank_fields, + "return_documents": return_documents, + "max_chunks_per_doc": max_chunks_per_doc, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + V2RerankResponse, + construct_type( + type_=V2RerankResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 401: + raise UnauthorizedError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + UnprocessableEntityErrorBody, + construct_type( + type_=UnprocessableEntityErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + TooManyRequestsErrorBody, + construct_type( + type_=TooManyRequestsErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + typing.cast( + ClientClosedRequestErrorBody, + construct_type( + type_=ClientClosedRequestErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 500: + raise InternalServerError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 501: + raise NotImplementedError( + typing.cast( + NotImplementedErrorBody, + construct_type( + type_=NotImplementedErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + typing.cast( + GatewayTimeoutErrorBody, + construct_type( + type_=GatewayTimeoutErrorBody, # type: ignore + object_=_response.json(), ), ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncV2Client: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def chat_stream( + self, + *, + model: str, + messages: ChatMessages, + tools: typing.Optional[typing.Sequence[ToolV2]] = OMIT, + documents: typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]] = OMIT, + citation_options: typing.Optional[CitationOptions] = OMIT, + response_format: typing.Optional[ResponseFormatV2] = OMIT, + safety_mode: typing.Optional[V2ChatStreamRequestSafetyMode] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT, + temperature: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + k: typing.Optional[float] = OMIT, + p: typing.Optional[float] = OMIT, + return_prompt: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[StreamedChatResponseV2]: + """ + Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides. + + Parameters + ---------- + model : str + The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model. + + messages : ChatMessages + + tools : typing.Optional[typing.Sequence[ToolV2]] + A list of available tools (functions) that the model may suggest invoking before producing a text response. + + When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. + + + documents : typing.Optional[typing.Sequence[V2ChatStreamRequestDocumentsItem]] + A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. + + + citation_options : typing.Optional[CitationOptions] + + response_format : typing.Optional[ResponseFormatV2] + + safety_mode : typing.Optional[V2ChatStreamRequestSafetyMode] + Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. + When `NONE` is specified, the safety instruction will be omitted. + + Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters. + + **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024](/docs/command-r-plus#august-2024-release) and newer. + + Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + + + max_tokens : typing.Optional[int] + The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + + + stop_sequences : typing.Optional[typing.Sequence[str]] + A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. + + + temperature : typing.Optional[float] + Defaults to `0.3`. + + A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. + + Randomness can be further maximized by increasing the value of the `p` parameter. + + + seed : typing.Optional[int] + If specified, the backend will make a best effort to sample tokens + deterministically, such that repeated requests with the same + seed and parameters should return the same result. However, + determinism cannot be totally guaranteed. + + + frequency_penalty : typing.Optional[float] + Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + + + presence_penalty : typing.Optional[float] + Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + + + k : typing.Optional[float] + Ensures only the top `k` most likely tokens are considered for generation at each step. + Defaults to `0`, min value of `0`, max value of `500`. + + + p : typing.Optional[float] + Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. + Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + + + return_prompt : typing.Optional[bool] + Whether to return the prompt in the response. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[StreamedChatResponseV2] + + + Examples + -------- + import asyncio + + from cohere import ( + AsyncClient, + CitationOptions, + TextResponseFormatV2, + ToolV2, + ToolV2Function, + UserChatMessageV2, + ) + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + + + async def main() -> None: + response = await client.v2.chat_stream( + model="string", + messages=[ + UserChatMessageV2( + content="string", + ) + ], + tools=[ + ToolV2( + function=ToolV2Function( + name="string", + description="string", + parameters={"string": {"key": "value"}}, + ), + ) + ], + documents=["string"], + citation_options=CitationOptions( + mode="FAST", + ), + response_format=TextResponseFormatV2(), + safety_mode="CONTEXTUAL", + max_tokens=1, + stop_sequences=["string"], + temperature=1.1, + seed=1, + frequency_penalty=1.1, + presence_penalty=1.1, + k=1.1, + p=1.1, + return_prompt=True, + ) + async for chunk in response: + yield chunk + + + asyncio.run(main()) + """ + async with self._client_wrapper.httpx_client.stream( + "v2/chat", + method="POST", + json={ + "model": model, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=ChatMessages, direction="write" + ), + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[ToolV2], direction="write" + ), + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2ChatStreamRequestDocumentsItem], direction="write" + ), + "citation_options": convert_and_respect_annotation_metadata( + object_=citation_options, annotation=CitationOptions, direction="write" + ), + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatV2, direction="write" + ), + "safety_mode": safety_mode, + "max_tokens": max_tokens, + "stop_sequences": stop_sequences, + "temperature": temperature, + "seed": seed, + "frequency_penalty": frequency_penalty, + "presence_penalty": presence_penalty, + "k": k, + "p": p, + "return_prompt": return_prompt, + "stream": True, + }, + request_options=request_options, + omit=OMIT, + ) as _response: + try: + if 200 <= _response.status_code < 300: + _event_source = httpx_sse.EventSource(_response) + async for _sse in _event_source.aiter_sse(): + try: + yield typing.cast( + StreamedChatResponseV2, + construct_type( + type_=StreamedChatResponseV2, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except: + pass + return + await _response.aread() + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 401: + raise UnauthorizedError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + UnprocessableEntityErrorBody, + construct_type( + type_=UnprocessableEntityErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + TooManyRequestsErrorBody, + construct_type( + type_=TooManyRequestsErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + typing.cast( + ClientClosedRequestErrorBody, + construct_type( + type_=ClientClosedRequestErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 500: + raise InternalServerError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 501: + raise NotImplementedError( + typing.cast( + NotImplementedErrorBody, + construct_type( + type_=NotImplementedErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + typing.cast( + GatewayTimeoutErrorBody, + construct_type( + type_=GatewayTimeoutErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def chat( + self, + *, + model: str, + messages: ChatMessages, + tools: typing.Optional[typing.Sequence[ToolV2]] = OMIT, + documents: typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]] = OMIT, + citation_options: typing.Optional[CitationOptions] = OMIT, + response_format: typing.Optional[ResponseFormatV2] = OMIT, + safety_mode: typing.Optional[V2ChatRequestSafetyMode] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT, + temperature: typing.Optional[float] = OMIT, + seed: typing.Optional[int] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + k: typing.Optional[float] = OMIT, + p: typing.Optional[float] = OMIT, + return_prompt: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ChatResponse: + """ + Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides. + + Parameters + ---------- + model : str + The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model. + + messages : ChatMessages + + tools : typing.Optional[typing.Sequence[ToolV2]] + A list of available tools (functions) that the model may suggest invoking before producing a text response. + + When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. + + + documents : typing.Optional[typing.Sequence[V2ChatRequestDocumentsItem]] + A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata. + + + citation_options : typing.Optional[CitationOptions] + + response_format : typing.Optional[ResponseFormatV2] + + safety_mode : typing.Optional[V2ChatRequestSafetyMode] + Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. + When `NONE` is specified, the safety instruction will be omitted. + + Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters. + + **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024](/docs/command-r-plus#august-2024-release) and newer. + + Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments + + + max_tokens : typing.Optional[int] + The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + + + stop_sequences : typing.Optional[typing.Sequence[str]] + A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. + + + temperature : typing.Optional[float] + Defaults to `0.3`. + + A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. + + Randomness can be further maximized by increasing the value of the `p` parameter. + + + seed : typing.Optional[int] + If specified, the backend will make a best effort to sample tokens + deterministically, such that repeated requests with the same + seed and parameters should return the same result. However, + determinism cannot be totally guaranteed. + + + frequency_penalty : typing.Optional[float] + Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + + + presence_penalty : typing.Optional[float] + Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + + + k : typing.Optional[float] + Ensures only the top `k` most likely tokens are considered for generation at each step. + Defaults to `0`, min value of `0`, max value of `500`. + + + p : typing.Optional[float] + Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. + Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + + + return_prompt : typing.Optional[bool] + Whether to return the prompt in the response. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ChatResponse + + + Examples + -------- + import asyncio + + from cohere import AsyncClient, ToolChatMessageV2 + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.v2.chat( + model="model", + messages=[ + ToolChatMessageV2( + tool_call_id="messages", + tool_content="messages", + ) ], - citation_mode="FAST", - response_format=ResponseFormat2_Text(), - safety_mode="CONTEXTUAL", - max_tokens=1, - stop_sequences=["string"], - temperature=1.1, - seed=1, - frequency_penalty=1.1, - presence_penalty=1.1, - k=1.1, - p=1.1, - return_prompt=True, ) - async for chunk in response: - yield chunk asyncio.run(main()) """ - async with self._client_wrapper.httpx_client.stream( + _response = await self._client_wrapper.httpx_client.request( "v2/chat", method="POST", json={ @@ -818,11 +1521,16 @@ async def main() -> None: object_=messages, annotation=ChatMessages, direction="write" ), "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[Tool2], direction="write" + object_=tools, annotation=typing.Sequence[ToolV2], direction="write" + ), + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2ChatRequestDocumentsItem], direction="write" + ), + "citation_options": convert_and_respect_annotation_metadata( + object_=citation_options, annotation=CitationOptions, direction="write" ), - "citation_mode": citation_mode, "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormat2, direction="write" + object_=response_format, annotation=ResponseFormatV2, direction="write" ), "safety_mode": safety_mode, "max_tokens": max_tokens, @@ -834,256 +1542,368 @@ async def main() -> None: "k": k, "p": p, "return_prompt": return_prompt, - "stream": True, + "stream": False, }, request_options=request_options, omit=OMIT, - ) as _response: - try: - if 200 <= _response.status_code < 300: - _event_source = httpx_sse.EventSource(_response) - async for _sse in _event_source.aiter_sse(): - try: - yield typing.cast( - StreamedChatResponse2, - construct_type( - type_=StreamedChatResponse2, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except: - pass - return - await _response.aread() - if _response.status_code == 400: - raise BadRequestError( - typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ChatResponse, + construct_type( + type_=ChatResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 401: - raise UnauthorizedError( - typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 401: + raise UnauthorizedError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 403: - raise ForbiddenError( - typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + UnprocessableEntityErrorBody, + construct_type( + type_=UnprocessableEntityErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + TooManyRequestsErrorBody, + construct_type( + type_=TooManyRequestsErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + typing.cast( + ClientClosedRequestErrorBody, + construct_type( + type_=ClientClosedRequestErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 500: + raise InternalServerError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 501: + raise NotImplementedError( + typing.cast( + NotImplementedErrorBody, + construct_type( + type_=NotImplementedErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + typing.cast( + GatewayTimeoutErrorBody, + construct_type( + type_=GatewayTimeoutErrorBody, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def embed( + self, *, request: EmbedRequestV2, request_options: typing.Optional[RequestOptions] = None + ) -> EmbedByTypeResponse: + """ + This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. + + Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. + + If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). + + Parameters + ---------- + request : EmbedRequestV2 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmbedByTypeResponse + OK + + Examples + -------- + import asyncio + + from cohere import AsyncClient, ImageEmbedRequestV2 + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + + + async def main() -> None: + await client.v2.embed( + request=ImageEmbedRequestV2( + images=["string"], + model="string", + ), + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v2/embed", + method="POST", + json=convert_and_respect_annotation_metadata(object_=request, annotation=EmbedRequestV2, direction="write"), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EmbedByTypeResponse, + construct_type( + type_=EmbedByTypeResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 401: + raise UnauthorizedError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 404: - raise NotFoundError( - typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - UnprocessableEntityErrorBody, - construct_type( - type_=UnprocessableEntityErrorBody, # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + UnprocessableEntityErrorBody, + construct_type( + type_=UnprocessableEntityErrorBody, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - TooManyRequestsErrorBody, - construct_type( - type_=TooManyRequestsErrorBody, # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + TooManyRequestsErrorBody, + construct_type( + type_=TooManyRequestsErrorBody, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 499: - raise ClientClosedRequestError( - typing.cast( - ClientClosedRequestErrorBody, - construct_type( - type_=ClientClosedRequestErrorBody, # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 499: + raise ClientClosedRequestError( + typing.cast( + ClientClosedRequestErrorBody, + construct_type( + type_=ClientClosedRequestErrorBody, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 500: - raise InternalServerError( - typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 500: + raise InternalServerError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 501: - raise NotImplementedError( - typing.cast( - NotImplementedErrorBody, - construct_type( - type_=NotImplementedErrorBody, # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 501: + raise NotImplementedError( + typing.cast( + NotImplementedErrorBody, + construct_type( + type_=NotImplementedErrorBody, # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 503: - raise ServiceUnavailableError( - typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 503: + raise ServiceUnavailableError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), ) - if _response.status_code == 504: - raise GatewayTimeoutError( - typing.cast( - GatewayTimeoutErrorBody, - construct_type( - type_=GatewayTimeoutErrorBody, # type: ignore - object_=_response.json(), - ), - ) + ) + if _response.status_code == 504: + raise GatewayTimeoutError( + typing.cast( + GatewayTimeoutErrorBody, + construct_type( + type_=GatewayTimeoutErrorBody, # type: ignore + object_=_response.json(), + ), ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) - async def chat( + async def rerank( self, *, model: str, - messages: ChatMessages, - tools: typing.Optional[typing.Sequence[Tool2]] = OMIT, - citation_mode: typing.Optional[V2ChatRequestCitationMode] = OMIT, - response_format: typing.Optional[ResponseFormat2] = OMIT, - safety_mode: typing.Optional[V2ChatRequestSafetyMode] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT, - temperature: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - k: typing.Optional[float] = OMIT, - p: typing.Optional[float] = OMIT, - return_prompt: typing.Optional[bool] = OMIT, + query: str, + documents: typing.Sequence[V2RerankRequestDocumentsItem], + top_n: typing.Optional[int] = OMIT, + rank_fields: typing.Optional[typing.Sequence[str]] = OMIT, + return_documents: typing.Optional[bool] = OMIT, + max_chunks_per_doc: typing.Optional[int] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> NonStreamedChatResponse2: + ) -> V2RerankResponse: """ - Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides. + This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. Parameters ---------- model : str - The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model. - - messages : ChatMessages - - tools : typing.Optional[typing.Sequence[Tool2]] - A list of available tools (functions) that the model may suggest invoking before producing a text response. - - When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - - - citation_mode : typing.Optional[V2ChatRequestCitationMode] - Defaults to `"accurate"`. - Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. - - - response_format : typing.Optional[ResponseFormat2] - - safety_mode : typing.Optional[V2ChatRequestSafetyMode] - Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`. - When `NONE` is specified, the safety instruction will be omitted. - - Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters. - - **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024](/docs/command-r-plus#august-2024-release) and newer. - - Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments - - - max_tokens : typing.Optional[int] - The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. - - - stop_sequences : typing.Optional[typing.Sequence[str]] - A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence. - - - temperature : typing.Optional[float] - Defaults to `0.3`. - - A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. - - Randomness can be further maximized by increasing the value of the `p` parameter. - - - seed : typing.Optional[int] - If specified, the backend will make a best effort to sample tokens - deterministically, such that repeated requests with the same - seed and parameters should return the same result. However, - determinism cannot be totally guaranteed. + The identifier of the model to use, one of : `rerank-english-v3.0`, `rerank-multilingual-v3.0`, `rerank-english-v2.0`, `rerank-multilingual-v2.0` + query : str + The search query - frequency_penalty : typing.Optional[float] - Defaults to `0.0`, min value of `0.0`, max value of `1.0`. - Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. - - - presence_penalty : typing.Optional[float] - Defaults to `0.0`, min value of `0.0`, max value of `1.0`. - Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + documents : typing.Sequence[V2RerankRequestDocumentsItem] + A list of document objects or strings to rerank. + If a document is provided the text fields is required and all other fields will be preserved in the response. + The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000. - k : typing.Optional[float] - Ensures only the top `k` most likely tokens are considered for generation at each step. - Defaults to `0`, min value of `0`, max value of `500`. + We recommend a maximum of 1,000 documents for optimal endpoint performance. + top_n : typing.Optional[int] + The number of most relevant documents or indices to return, defaults to the length of the documents - p : typing.Optional[float] - Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. - Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + rank_fields : typing.Optional[typing.Sequence[str]] + If a JSON object is provided, you can specify which keys you would like to have considered for reranking. The model will rerank based on order of the fields passed in (i.e. rank_fields=['title','author','text'] will rerank using the values in title, author, text sequentially. If the length of title, author, and text exceeds the context length of the model, the chunking will not re-consider earlier fields). If not provided, the model will use the default text field for ranking. + return_documents : typing.Optional[bool] + - If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request. + - If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request. - return_prompt : typing.Optional[bool] - Whether to return the prompt in the response. + max_chunks_per_doc : typing.Optional[int] + The maximum number of chunks to produce internally from a document request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - NonStreamedChatResponse2 - + V2RerankResponse + OK Examples -------- import asyncio from cohere import AsyncClient - from cohere.v2 import ChatMessage2_Tool client = AsyncClient( client_name="YOUR_CLIENT_NAME", @@ -1092,45 +1912,28 @@ async def chat( async def main() -> None: - await client.v2.chat( + await client.v2.rerank( model="model", - messages=[ - ChatMessage2_Tool( - tool_call_id="messages", - tool_content=["messages"], - ) - ], + query="query", + documents=["documents"], ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v2/chat", + "v2/rerank", method="POST", json={ "model": model, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=ChatMessages, direction="write" - ), - "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[Tool2], direction="write" - ), - "citation_mode": citation_mode, - "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormat2, direction="write" + "query": query, + "documents": convert_and_respect_annotation_metadata( + object_=documents, annotation=typing.Sequence[V2RerankRequestDocumentsItem], direction="write" ), - "safety_mode": safety_mode, - "max_tokens": max_tokens, - "stop_sequences": stop_sequences, - "temperature": temperature, - "seed": seed, - "frequency_penalty": frequency_penalty, - "presence_penalty": presence_penalty, - "k": k, - "p": p, - "return_prompt": return_prompt, - "stream": False, + "top_n": top_n, + "rank_fields": rank_fields, + "return_documents": return_documents, + "max_chunks_per_doc": max_chunks_per_doc, }, request_options=request_options, omit=OMIT, @@ -1138,9 +1941,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - NonStreamedChatResponse2, + V2RerankResponse, construct_type( - type_=NonStreamedChatResponse2, # type: ignore + type_=V2RerankResponse, # type: ignore object_=_response.json(), ), ) diff --git a/src/cohere/v2/types/__init__.py b/src/cohere/v2/types/__init__.py index 33038791e..529151db8 100644 --- a/src/cohere/v2/types/__init__.py +++ b/src/cohere/v2/types/__init__.py @@ -1,182 +1,21 @@ # This file was auto-generated by Fern from our API Definition. -from .assistant_message import AssistantMessage -from .assistant_message_content import AssistantMessageContent -from .assistant_message_content_item import AssistantMessageContentItem, AssistantMessageContentItem_Text -from .assistant_message_response import AssistantMessageResponse -from .assistant_message_response_content_item import ( - AssistantMessageResponseContentItem, - AssistantMessageResponseContentItem_Text, -) -from .chat_content_delta_event import ChatContentDeltaEvent -from .chat_content_delta_event_delta import ChatContentDeltaEventDelta -from .chat_content_delta_event_delta_message import ChatContentDeltaEventDeltaMessage -from .chat_content_delta_event_delta_message_content import ChatContentDeltaEventDeltaMessageContent -from .chat_content_end_event import ChatContentEndEvent -from .chat_content_start_event import ChatContentStartEvent -from .chat_content_start_event_delta import ChatContentStartEventDelta -from .chat_content_start_event_delta_message import ChatContentStartEventDeltaMessage -from .chat_content_start_event_delta_message_content import ChatContentStartEventDeltaMessageContent -from .chat_finish_reason import ChatFinishReason -from .chat_message2 import ( - ChatMessage2, - ChatMessage2_Assistant, - ChatMessage2_System, - ChatMessage2_Tool, - ChatMessage2_User, -) -from .chat_message_end_event import ChatMessageEndEvent -from .chat_message_end_event_delta import ChatMessageEndEventDelta -from .chat_message_start_event import ChatMessageStartEvent -from .chat_message_start_event_delta import ChatMessageStartEventDelta -from .chat_message_start_event_delta_message import ChatMessageStartEventDeltaMessage -from .chat_messages import ChatMessages -from .chat_stream_event_type import ChatStreamEventType -from .chat_tool_call_delta_event import ChatToolCallDeltaEvent -from .chat_tool_call_delta_event_delta import ChatToolCallDeltaEventDelta -from .chat_tool_call_delta_event_delta_tool_call import ChatToolCallDeltaEventDeltaToolCall -from .chat_tool_call_delta_event_delta_tool_call_function import ChatToolCallDeltaEventDeltaToolCallFunction -from .chat_tool_call_end_event import ChatToolCallEndEvent -from .chat_tool_call_start_event import ChatToolCallStartEvent -from .chat_tool_call_start_event_delta import ChatToolCallStartEventDelta -from .chat_tool_call_start_event_delta_tool_call import ChatToolCallStartEventDeltaToolCall -from .chat_tool_call_start_event_delta_tool_call_function import ChatToolCallStartEventDeltaToolCallFunction -from .chat_tool_plan_delta_event import ChatToolPlanDeltaEvent -from .chat_tool_plan_delta_event_delta import ChatToolPlanDeltaEventDelta -from .citation import Citation -from .citation_end_event import CitationEndEvent -from .citation_start_event import CitationStartEvent -from .citation_start_event_delta import CitationStartEventDelta -from .citation_start_event_delta_message import CitationStartEventDeltaMessage -from .content import Content, Content_Text -from .document_source import DocumentSource -from .json_response_format2 import JsonResponseFormat2 -from .non_streamed_chat_response2 import NonStreamedChatResponse2 -from .response_format2 import ResponseFormat2, ResponseFormat2_JsonObject, ResponseFormat2_Text -from .source import Source, Source_Document, Source_Tool -from .streamed_chat_response2 import ( - StreamedChatResponse2, - StreamedChatResponse2_CitationEnd, - StreamedChatResponse2_CitationStart, - StreamedChatResponse2_ContentDelta, - StreamedChatResponse2_ContentEnd, - StreamedChatResponse2_ContentStart, - StreamedChatResponse2_MessageEnd, - StreamedChatResponse2_MessageStart, - StreamedChatResponse2_ToolCallDelta, - StreamedChatResponse2_ToolCallEnd, - StreamedChatResponse2_ToolCallStart, - StreamedChatResponse2_ToolPlanDelta, -) -from .system_message import SystemMessage -from .system_message_content import SystemMessageContent -from .system_message_content_item import SystemMessageContentItem, SystemMessageContentItem_Text -from .text_content import TextContent -from .text_response_format2 import TextResponseFormat2 -from .tool2 import Tool2 -from .tool2function import Tool2Function -from .tool_call2 import ToolCall2 -from .tool_call2function import ToolCall2Function -from .tool_message2 import ToolMessage2 -from .tool_source import ToolSource -from .usage import Usage -from .usage_billed_units import UsageBilledUnits -from .usage_tokens import UsageTokens -from .user_message import UserMessage -from .user_message_content import UserMessageContent -from .v2chat_request_citation_mode import V2ChatRequestCitationMode +from .v2chat_request_documents_item import V2ChatRequestDocumentsItem from .v2chat_request_safety_mode import V2ChatRequestSafetyMode -from .v2chat_stream_request_citation_mode import V2ChatStreamRequestCitationMode +from .v2chat_stream_request_documents_item import V2ChatStreamRequestDocumentsItem from .v2chat_stream_request_safety_mode import V2ChatStreamRequestSafetyMode +from .v2rerank_request_documents_item import V2RerankRequestDocumentsItem +from .v2rerank_response import V2RerankResponse +from .v2rerank_response_results_item import V2RerankResponseResultsItem +from .v2rerank_response_results_item_document import V2RerankResponseResultsItemDocument __all__ = [ - "AssistantMessage", - "AssistantMessageContent", - "AssistantMessageContentItem", - "AssistantMessageContentItem_Text", - "AssistantMessageResponse", - "AssistantMessageResponseContentItem", - "AssistantMessageResponseContentItem_Text", - "ChatContentDeltaEvent", - "ChatContentDeltaEventDelta", - "ChatContentDeltaEventDeltaMessage", - "ChatContentDeltaEventDeltaMessageContent", - "ChatContentEndEvent", - "ChatContentStartEvent", - "ChatContentStartEventDelta", - "ChatContentStartEventDeltaMessage", - "ChatContentStartEventDeltaMessageContent", - "ChatFinishReason", - "ChatMessage2", - "ChatMessage2_Assistant", - "ChatMessage2_System", - "ChatMessage2_Tool", - "ChatMessage2_User", - "ChatMessageEndEvent", - "ChatMessageEndEventDelta", - "ChatMessageStartEvent", - "ChatMessageStartEventDelta", - "ChatMessageStartEventDeltaMessage", - "ChatMessages", - "ChatStreamEventType", - "ChatToolCallDeltaEvent", - "ChatToolCallDeltaEventDelta", - "ChatToolCallDeltaEventDeltaToolCall", - "ChatToolCallDeltaEventDeltaToolCallFunction", - "ChatToolCallEndEvent", - "ChatToolCallStartEvent", - "ChatToolCallStartEventDelta", - "ChatToolCallStartEventDeltaToolCall", - "ChatToolCallStartEventDeltaToolCallFunction", - "ChatToolPlanDeltaEvent", - "ChatToolPlanDeltaEventDelta", - "Citation", - "CitationEndEvent", - "CitationStartEvent", - "CitationStartEventDelta", - "CitationStartEventDeltaMessage", - "Content", - "Content_Text", - "DocumentSource", - "JsonResponseFormat2", - "NonStreamedChatResponse2", - "ResponseFormat2", - "ResponseFormat2_JsonObject", - "ResponseFormat2_Text", - "Source", - "Source_Document", - "Source_Tool", - "StreamedChatResponse2", - "StreamedChatResponse2_CitationEnd", - "StreamedChatResponse2_CitationStart", - "StreamedChatResponse2_ContentDelta", - "StreamedChatResponse2_ContentEnd", - "StreamedChatResponse2_ContentStart", - "StreamedChatResponse2_MessageEnd", - "StreamedChatResponse2_MessageStart", - "StreamedChatResponse2_ToolCallDelta", - "StreamedChatResponse2_ToolCallEnd", - "StreamedChatResponse2_ToolCallStart", - "StreamedChatResponse2_ToolPlanDelta", - "SystemMessage", - "SystemMessageContent", - "SystemMessageContentItem", - "SystemMessageContentItem_Text", - "TextContent", - "TextResponseFormat2", - "Tool2", - "Tool2Function", - "ToolCall2", - "ToolCall2Function", - "ToolMessage2", - "ToolSource", - "Usage", - "UsageBilledUnits", - "UsageTokens", - "UserMessage", - "UserMessageContent", - "V2ChatRequestCitationMode", + "V2ChatRequestDocumentsItem", "V2ChatRequestSafetyMode", - "V2ChatStreamRequestCitationMode", + "V2ChatStreamRequestDocumentsItem", "V2ChatStreamRequestSafetyMode", + "V2RerankRequestDocumentsItem", + "V2RerankResponse", + "V2RerankResponseResultsItem", + "V2RerankResponseResultsItemDocument", ] diff --git a/src/cohere/v2/types/chat_messages.py b/src/cohere/v2/types/chat_messages.py deleted file mode 100644 index 5daf64301..000000000 --- a/src/cohere/v2/types/chat_messages.py +++ /dev/null @@ -1,6 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from .chat_message2 import ChatMessage2 - -ChatMessages = typing.List[ChatMessage2] diff --git a/src/cohere/v2/types/v2chat_request_citation_mode.py b/src/cohere/v2/types/v2chat_request_citation_mode.py deleted file mode 100644 index 9b5dc7d5a..000000000 --- a/src/cohere/v2/types/v2chat_request_citation_mode.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -V2ChatRequestCitationMode = typing.Union[typing.Literal["FAST", "ACCURATE", "OFF"], typing.Any] diff --git a/src/cohere/v2/types/v2chat_request_documents_item.py b/src/cohere/v2/types/v2chat_request_documents_item.py new file mode 100644 index 000000000..c8a838dbc --- /dev/null +++ b/src/cohere/v2/types/v2chat_request_documents_item.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.document import Document + +V2ChatRequestDocumentsItem = typing.Union[str, Document] diff --git a/src/cohere/v2/types/v2chat_stream_request_citation_mode.py b/src/cohere/v2/types/v2chat_stream_request_citation_mode.py deleted file mode 100644 index 2e07c9ebe..000000000 --- a/src/cohere/v2/types/v2chat_stream_request_citation_mode.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -V2ChatStreamRequestCitationMode = typing.Union[typing.Literal["FAST", "ACCURATE", "OFF"], typing.Any] diff --git a/src/cohere/v2/types/v2chat_stream_request_documents_item.py b/src/cohere/v2/types/v2chat_stream_request_documents_item.py new file mode 100644 index 000000000..31b1f4a3f --- /dev/null +++ b/src/cohere/v2/types/v2chat_stream_request_documents_item.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.document import Document + +V2ChatStreamRequestDocumentsItem = typing.Union[str, Document] diff --git a/src/cohere/v2/types/v2rerank_request_documents_item.py b/src/cohere/v2/types/v2rerank_request_documents_item.py new file mode 100644 index 000000000..d66b77dc4 --- /dev/null +++ b/src/cohere/v2/types/v2rerank_request_documents_item.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.rerank_document import RerankDocument + +V2RerankRequestDocumentsItem = typing.Union[str, RerankDocument] diff --git a/src/cohere/v2/types/tool_message2.py b/src/cohere/v2/types/v2rerank_response.py similarity index 59% rename from src/cohere/v2/types/tool_message2.py rename to src/cohere/v2/types/v2rerank_response.py index 515226d72..54583df46 100644 --- a/src/cohere/v2/types/tool_message2.py +++ b/src/cohere/v2/types/v2rerank_response.py @@ -1,25 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ...core.unchecked_base_model import UncheckedBaseModel -import pydantic import typing +from .v2rerank_response_results_item import V2RerankResponseResultsItem +import pydantic +from ...types.api_meta import ApiMeta from ...core.pydantic_utilities import IS_PYDANTIC_V2 -class ToolMessage2(UncheckedBaseModel): +class V2RerankResponse(UncheckedBaseModel): + id: typing.Optional[str] = None + results: typing.List[V2RerankResponseResultsItem] = pydantic.Field() """ - A message from the system. + An ordered list of ranked documents """ - tool_call_id: str = pydantic.Field() - """ - The id of the associated tool call that has provided the given content - """ - - tool_content: typing.List[str] = pydantic.Field() - """ - A list of outputs from a tool. The content should formatted as a JSON object string - """ + meta: typing.Optional[ApiMeta] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/cohere/v2/types/v2rerank_response_results_item.py b/src/cohere/v2/types/v2rerank_response_results_item.py new file mode 100644 index 000000000..4a09afd99 --- /dev/null +++ b/src/cohere/v2/types/v2rerank_response_results_item.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.unchecked_base_model import UncheckedBaseModel +import typing +from .v2rerank_response_results_item_document import V2RerankResponseResultsItemDocument +import pydantic +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class V2RerankResponseResultsItem(UncheckedBaseModel): + document: typing.Optional[V2RerankResponseResultsItemDocument] = pydantic.Field(default=None) + """ + If `return_documents` is set as `false` this will return none, if `true` it will return the documents passed in + """ + + index: int = pydantic.Field() + """ + Corresponds to the index in the original list of documents to which the ranked document belongs. (i.e. if the first value in the `results` object has an `index` value of 3, it means in the list of documents passed in, the document at `index=3` had the highest relevance) + """ + + relevance_score: float = pydantic.Field() + """ + Relevance scores are normalized to be in the range `[0, 1]`. Scores close to `1` indicate a high relevance to the query, and scores closer to `0` indicate low relevance. It is not accurate to assume a score of 0.9 means the document is 2x more relevant than a document with a score of 0.45 + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/cohere/v2/types/tool2.py b/src/cohere/v2/types/v2rerank_response_results_item_document.py similarity index 66% rename from src/cohere/v2/types/tool2.py rename to src/cohere/v2/types/v2rerank_response_results_item_document.py index 9891642b8..e526cc60f 100644 --- a/src/cohere/v2/types/tool2.py +++ b/src/cohere/v2/types/v2rerank_response_results_item_document.py @@ -1,17 +1,19 @@ # This file was auto-generated by Fern from our API Definition. from ...core.unchecked_base_model import UncheckedBaseModel -import typing -from .tool2function import Tool2Function import pydantic from ...core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + +class V2RerankResponseResultsItemDocument(UncheckedBaseModel): + """ + If `return_documents` is set as `false` this will return none, if `true` it will return the documents passed in + """ -class Tool2(UncheckedBaseModel): - type: typing.Optional[typing.Literal["function"]] = None - function: typing.Optional[Tool2Function] = pydantic.Field(default=None) + text: str = pydantic.Field() """ - The function to be executed. + The text of the document to rerank """ if IS_PYDANTIC_V2: diff --git a/tests/test_async_client.py b/tests/test_async_client.py index 63ecb086c..187d8d53b 100644 --- a/tests/test_async_client.py +++ b/tests/test_async_client.py @@ -3,7 +3,7 @@ import cohere from cohere import ChatConnector, ClassifyExample, CreateConnectorServiceAuth, Tool, \ - ToolParameterDefinitionsValue, ToolResult, Message_User, Message_Chatbot + ToolParameterDefinitionsValue, ToolResult, UserMessage, ChatbotMessage package_dir = os.path.dirname(os.path.abspath(__file__)) embed_job = os.path.join(package_dir, 'embed_job.jsonl') @@ -26,9 +26,9 @@ async def test_context_manager(self) -> None: async def test_chat(self) -> None: chat = await self.co.chat( chat_history=[ - Message_User( + UserMessage( message="Who discovered gravity?"), - Message_Chatbot(message="The man who is widely credited with discovering " + ChatbotMessage(message="The man who is widely credited with discovering " "gravity is Sir Isaac Newton") ], message="What year was he born?", @@ -40,9 +40,9 @@ async def test_chat(self) -> None: async def test_chat_stream(self) -> None: stream = self.co.chat_stream( chat_history=[ - Message_User( + UserMessage( message="Who discovered gravity?"), - Message_Chatbot(message="The man who is widely credited with discovering " + ChatbotMessage(message="The man who is widely credited with discovering " "gravity is Sir Isaac Newton") ], message="What year was he born?", diff --git a/tests/test_client.py b/tests/test_client.py index aaee3923e..9a839877e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -4,7 +4,7 @@ import cohere from cohere import ChatConnector, ClassifyExample, CreateConnectorServiceAuth, Tool, \ - ToolParameterDefinitionsValue, ToolResult, Message_Chatbot, Message_User, ResponseFormat_JsonObject + ToolParameterDefinitionsValue, ToolResult, ChatbotMessage, UserMessage, JsonObjectResponseFormat co = cohere.Client(timeout=10000) @@ -25,9 +25,9 @@ def test_context_manager(self) -> None: def test_chat(self) -> None: chat = co.chat( chat_history=[ - Message_User( + UserMessage( message="Who discovered gravity?"), - Message_Chatbot(message="The man who is widely credited with discovering " + ChatbotMessage(message="The man who is widely credited with discovering " "gravity is Sir Isaac Newton") ], message="What year was he born?", @@ -40,7 +40,7 @@ def test_chat(self) -> None: def test_response_format(self) -> None: chat = co.chat( message="imagine a character from the tv show severance", - response_format=ResponseFormat_JsonObject( + response_format=JsonObjectResponseFormat( schema={ "type": "object", "properties": { @@ -61,9 +61,9 @@ def test_response_format(self) -> None: def test_chat_stream(self) -> None: stream = co.chat_stream( chat_history=[ - Message_User( + UserMessage( message="Who discovered gravity?"), - Message_Chatbot(message="The man who is widely credited with discovering " + ChatbotMessage(message="The man who is widely credited with discovering " "gravity is Sir Isaac Newton") ], message="What year was he born?", diff --git a/tests/test_client_v2.py b/tests/test_client_v2.py index 06f654a43..235fc9436 100644 --- a/tests/test_client_v2.py +++ b/tests/test_client_v2.py @@ -3,7 +3,7 @@ import unittest import cohere -from cohere import ToolMessage2, UserMessage, AssistantMessage +from cohere import ToolMessage, UserMessage, AssistantMessage co = cohere.ClientV2(timeout=10000) @@ -14,12 +14,12 @@ class TestClientV2(unittest.TestCase): def test_chat(self) -> None: - response = co.chat(model="command-r-plus", messages=[cohere.v2.ChatMessage2_User(content="hello world!")]) + response = co.chat(model="command-r-plus", messages=[cohere.UserChatMessageV2(content="hello world!")]) print(response.message) def test_chat_stream(self) -> None: - stream = co.chat_stream(model="command-r-plus", messages=[cohere.v2.ChatMessage2_User(content="hello world!")]) + stream = co.chat_stream(model="command-r-plus", messages=[cohere.UserChatMessageV2(content="hello world!")]) events = set() @@ -43,8 +43,8 @@ def test_chat_documents(self) -> None: {"title": "widget sales 2021", "text": "4 million"}, ] response = co.chat( - messages=cohere.v2.UserMessage( - content=cohere.v2.TextContent(text="how many widges were sold in 2020?"), + messages=cohere.UserChatMessageV2( + content=cohere.TextContent(text="how many widges were sold in 2020?"), documents=documents, ), ) @@ -67,17 +67,17 @@ def test_chat_tools(self) -> None: "required": ["location"], }, } - tools = [cohere.v2.Tool2(type="function", function=get_weather_tool)] - messages: typing.List[typing.Union[UserMessage, AssistantMessage, None, ToolMessage2]] = [ - cohere.v2.UserMessage(content="what is the weather in Toronto?") + tools = [cohere.ToolV2(type="function", function=get_weather_tool)] + messages: cohere.ChatMessages = [ + cohere.UserChatMessageV2(content="what is the weather in Toronto?") ] res = co.chat(model="command-r-plus", tools=tools, messages=messages) # call the get_weather tool tool_result = {"temperature": "30C"} - tool_content = [cohere.v2.Content(output=tool_result, text="The weather in Toronto is 30C")] + tool_content = [cohere.Content(output=tool_result, text="The weather in Toronto is 30C")] messages.append(res.message) - messages.append(cohere.v2.ToolMessage2(tool_call_id=res.message.tool_calls[0].id, tool_content=tool_content)) + messages.append(cohere.ToolChatMessageV2(tool_call_id=res.message.tool_calls[0].id, tool_content=tool_content)) res = co.chat(tools=tools, messages=messages) print(res.message) diff --git a/tests/test_embed_utils.py b/tests/test_embed_utils.py index 17813658a..40c712177 100644 --- a/tests/test_embed_utils.py +++ b/tests/test_embed_utils.py @@ -1,10 +1,10 @@ import unittest -from cohere import EmbedResponse_EmbeddingsByType, EmbedByTypeResponseEmbeddings, ApiMeta, ApiMetaBilledUnits, \ - ApiMetaApiVersion, EmbedResponse_EmbeddingsFloats +from cohere import EmbeddingsByTypeEmbedResponse, EmbedByTypeResponseEmbeddings, ApiMeta, ApiMetaBilledUnits, \ + ApiMetaApiVersion, EmbeddingsFloatsEmbedResponse from cohere.utils import merge_embed_responses -ebt_1 = EmbedResponse_EmbeddingsByType( +ebt_1 = EmbeddingsByTypeEmbedResponse( response_type="embeddings_by_type", id="1", embeddings=EmbedByTypeResponseEmbeddings( @@ -27,7 +27,7 @@ ) ) -ebt_2 = EmbedResponse_EmbeddingsByType( +ebt_2 = EmbeddingsByTypeEmbedResponse( response_type="embeddings_by_type", id="2", embeddings=EmbedByTypeResponseEmbeddings( @@ -50,7 +50,7 @@ ) ) -ebt_partial_1 = EmbedResponse_EmbeddingsByType( +ebt_partial_1 = EmbeddingsByTypeEmbedResponse( response_type="embeddings_by_type", id="1", embeddings=EmbedByTypeResponseEmbeddings( @@ -71,7 +71,7 @@ ) ) -ebt_partial_2 = EmbedResponse_EmbeddingsByType( +ebt_partial_2 = EmbeddingsByTypeEmbedResponse( response_type="embeddings_by_type", id="2", embeddings=EmbedByTypeResponseEmbeddings( @@ -92,7 +92,7 @@ ) ) -ebf_1 = EmbedResponse_EmbeddingsFloats( +ebf_1 = EmbeddingsFloatsEmbedResponse( response_type="embeddings_floats", id="1", texts=["hello", "goodbye"], @@ -109,7 +109,7 @@ ) ) -ebf_2 = EmbedResponse_EmbeddingsFloats( +ebf_2 = EmbeddingsFloatsEmbedResponse( response_type="embeddings_floats", id="2", texts=["bye", "seeya"], @@ -139,7 +139,7 @@ def test_merge_embeddings_by_type(self) -> None: raise Exception("this is just for mpy") self.assertEqual(set(resp.meta.warnings or []), {"test_warning_1", "test_warning_2"}) - self.assertEqual(resp, EmbedResponse_EmbeddingsByType( + self.assertEqual(resp, EmbeddingsByTypeEmbedResponse( response_type="embeddings_by_type", id="1, 2", embeddings=EmbedByTypeResponseEmbeddings( @@ -172,7 +172,7 @@ def test_merge_embeddings_floats(self) -> None: raise Exception("this is just for mpy") self.assertEqual(set(resp.meta.warnings or []), {"test_warning_1", "test_warning_2"}) - self.assertEqual(resp, EmbedResponse_EmbeddingsFloats( + self.assertEqual(resp, EmbeddingsFloatsEmbedResponse( response_type="embeddings_floats", id="1, 2", texts=["hello", "goodbye", "bye", "seeya"], @@ -199,7 +199,7 @@ def test_merge_partial_embeddings_floats(self) -> None: raise Exception("this is just for mpy") self.assertEqual(set(resp.meta.warnings or []), {"test_warning_1", "test_warning_2"}) - self.assertEqual(resp, EmbedResponse_EmbeddingsByType( + self.assertEqual(resp, EmbeddingsByTypeEmbedResponse( response_type="embeddings_by_type", id="1, 2", embeddings=EmbedByTypeResponseEmbeddings(