From 53276043110f3062f595315fa294a887b10de118 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Fri, 16 Feb 2024 14:41:42 +0000 Subject: [PATCH] SDK regeneration --- .github/workflows/ci.yml | 6 +- poetry.lock | 363 +++------ pyproject.toml | 7 +- src/cohere/client.py | 735 +++++++++++++----- src/cohere/core/__init__.py | 2 + src/cohere/core/client_wrapper.py | 2 +- src/cohere/core/request_options.py | 29 + src/cohere/environment.py | 2 +- src/cohere/resources/connectors/client.py | 421 +++++++--- src/cohere/resources/datasets/client.py | 355 ++++++--- src/cohere/resources/embed_jobs/client.py | 238 ++++-- src/cohere/types/embed_response.py | 6 +- .../types/generate_streamed_response.py | 8 +- .../types/non_streamed_chat_response.py | 5 +- src/cohere/types/streamed_chat_response.py | 14 +- 15 files changed, 1501 insertions(+), 692 deletions(-) create mode 100644 src/cohere/core/request_options.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0a5b56ed8..0b26bf2a3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: - name: Set up python uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.8 - name: Bootstrap poetry run: | curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 @@ -26,7 +26,7 @@ jobs: - name: Set up python uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.8 - name: Bootstrap poetry run: | curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 @@ -45,7 +45,7 @@ jobs: - name: Set up python uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.8 - name: Bootstrap poetry run: | curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 diff --git a/poetry.lock b/poetry.lock index c23d38665..a8a1c18df 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "annotated-types" -version = "0.5.0" +version = "0.6.0" description = "Reusable constraint types to use with typing.Annotated" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"}, - {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"}, + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] [package.dependencies] @@ -16,25 +16,25 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" -version = "3.7.1" +version = "4.2.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, - {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, + {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"}, + {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"}, ] [package.dependencies] -exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" -typing-extensions = {version = "*", markers = "python_version < \"3.8\""} +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] -test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (<0.22)"] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] [[package]] name = "certifi" @@ -83,44 +83,42 @@ files = [ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, ] -[package.dependencies] -typing-extensions = {version = "*", markers = "python_version < \"3.8\""} - [[package]] name = "httpcore" -version = "0.17.3" +version = "1.0.3" description = "A minimal low-level HTTP client." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "httpcore-0.17.3-py3-none-any.whl", hash = "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"}, - {file = "httpcore-0.17.3.tar.gz", hash = "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888"}, + {file = "httpcore-1.0.3-py3-none-any.whl", hash = "sha256:9a6a501c3099307d9fd76ac244e08503427679b1e81ceb1d922485e2f2462ad2"}, + {file = "httpcore-1.0.3.tar.gz", hash = "sha256:5c0f9546ad17dac4d0772b0808856eb616eb8b48ce94f49ed819fd6982a8a544"}, ] [package.dependencies] -anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" -sniffio = "==1.*" [package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.24.0)"] [[package]] name = "httpx" -version = "0.24.1" +version = "0.26.0" description = "The next generation HTTP client." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"}, - {file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"}, + {file = "httpx-0.26.0-py3-none-any.whl", hash = "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd"}, + {file = "httpx-0.26.0.tar.gz", hash = "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf"}, ] [package.dependencies] +anyio = "*" certifi = "*" -httpcore = ">=0.15.0,<0.18.0" +httpcore = "==1.*" idna = "*" sniffio = "*" @@ -141,26 +139,6 @@ files = [ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, ] -[[package]] -name = "importlib-metadata" -version = "6.7.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, - {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, -] - -[package.dependencies] -typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} -zipp = ">=0.5" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] - [[package]] name = "iniconfig" version = "2.0.0" @@ -207,7 +185,6 @@ files = [ [package.dependencies] mypy-extensions = ">=0.4.3" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} typing-extensions = ">=3.10" [package.extras] @@ -239,36 +216,33 @@ files = [ [[package]] name = "pluggy" -version = "1.2.0" +version = "1.4.0" description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pluggy-1.2.0-py3-none-any.whl", hash = "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849"}, - {file = "pluggy-1.2.0.tar.gz", hash = "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"}, + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] -[package.dependencies] -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} - [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.4.2" +version = "2.6.1" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-2.4.2-py3-none-any.whl", hash = "sha256:bc3ddf669d234f4220e6e1c4d96b061abe0998185a8d7855c0126782b7abc8c1"}, - {file = "pydantic-2.4.2.tar.gz", hash = "sha256:94f336138093a5d7f426aac732dcfe7ab4eb4da243c88f891d65deb4a2556ee7"}, + {file = "pydantic-2.6.1-py3-none-any.whl", hash = "sha256:0b6a909df3192245cb736509a92ff69e4fef76116feffec68e93a567347bae6f"}, + {file = "pydantic-2.6.1.tar.gz", hash = "sha256:4fd5c182a2488dc63e6d32737ff19937888001e2a6d86e94b3f233104a5d1fa9"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.10.1" +pydantic-core = "2.16.2" typing-extensions = ">=4.6.1" [package.extras] @@ -276,117 +250,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.10.1" +version = "2.16.2" description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.10.1-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:d64728ee14e667ba27c66314b7d880b8eeb050e58ffc5fec3b7a109f8cddbd63"}, - {file = "pydantic_core-2.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:48525933fea744a3e7464c19bfede85df4aba79ce90c60b94d8b6e1eddd67096"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef337945bbd76cce390d1b2496ccf9f90b1c1242a3a7bc242ca4a9fc5993427a"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1392e0638af203cee360495fd2cfdd6054711f2db5175b6e9c3c461b76f5175"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0675ba5d22de54d07bccde38997e780044dcfa9a71aac9fd7d4d7a1d2e3e65f7"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:128552af70a64660f21cb0eb4876cbdadf1a1f9d5de820fed6421fa8de07c893"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f6e6aed5818c264412ac0598b581a002a9f050cb2637a84979859e70197aa9e"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ecaac27da855b8d73f92123e5f03612b04c5632fd0a476e469dfc47cd37d6b2e"}, - {file = "pydantic_core-2.10.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b3c01c2fb081fced3bbb3da78510693dc7121bb893a1f0f5f4b48013201f362e"}, - {file = "pydantic_core-2.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:92f675fefa977625105708492850bcbc1182bfc3e997f8eecb866d1927c98ae6"}, - {file = "pydantic_core-2.10.1-cp310-none-win32.whl", hash = "sha256:420a692b547736a8d8703c39ea935ab5d8f0d2573f8f123b0a294e49a73f214b"}, - {file = "pydantic_core-2.10.1-cp310-none-win_amd64.whl", hash = "sha256:0880e239827b4b5b3e2ce05e6b766a7414e5f5aedc4523be6b68cfbc7f61c5d0"}, - {file = "pydantic_core-2.10.1-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:073d4a470b195d2b2245d0343569aac7e979d3a0dcce6c7d2af6d8a920ad0bea"}, - {file = "pydantic_core-2.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:600d04a7b342363058b9190d4e929a8e2e715c5682a70cc37d5ded1e0dd370b4"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39215d809470f4c8d1881758575b2abfb80174a9e8daf8f33b1d4379357e417c"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eeb3d3d6b399ffe55f9a04e09e635554012f1980696d6b0aca3e6cf42a17a03b"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a7902bf75779bc12ccfc508bfb7a4c47063f748ea3de87135d433a4cca7a2f"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3625578b6010c65964d177626fde80cf60d7f2e297d56b925cb5cdeda6e9925a"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa48fc31fc7243e50188197b5f0c4228956f97b954f76da157aae7f67269ae8"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:07ec6d7d929ae9c68f716195ce15e745b3e8fa122fc67698ac6498d802ed0fa4"}, - {file = "pydantic_core-2.10.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6f31a17acede6a8cd1ae2d123ce04d8cca74056c9d456075f4f6f85de055607"}, - {file = "pydantic_core-2.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8f1ebca515a03e5654f88411420fea6380fc841d1bea08effb28184e3d4899f"}, - {file = "pydantic_core-2.10.1-cp311-none-win32.whl", hash = "sha256:6db2eb9654a85ada248afa5a6db5ff1cf0f7b16043a6b070adc4a5be68c716d6"}, - {file = "pydantic_core-2.10.1-cp311-none-win_amd64.whl", hash = "sha256:4a5be350f922430997f240d25f8219f93b0c81e15f7b30b868b2fddfc2d05f27"}, - {file = "pydantic_core-2.10.1-cp311-none-win_arm64.whl", hash = "sha256:5fdb39f67c779b183b0c853cd6b45f7db84b84e0571b3ef1c89cdb1dfc367325"}, - {file = "pydantic_core-2.10.1-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:b1f22a9ab44de5f082216270552aa54259db20189e68fc12484873d926426921"}, - {file = "pydantic_core-2.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8572cadbf4cfa95fb4187775b5ade2eaa93511f07947b38f4cd67cf10783b118"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db9a28c063c7c00844ae42a80203eb6d2d6bbb97070cfa00194dff40e6f545ab"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e2a35baa428181cb2270a15864ec6286822d3576f2ed0f4cd7f0c1708472aff"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05560ab976012bf40f25d5225a58bfa649bb897b87192a36c6fef1ab132540d7"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6495008733c7521a89422d7a68efa0a0122c99a5861f06020ef5b1f51f9ba7c"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ac492c686defc8e6133e3a2d9eaf5261b3df26b8ae97450c1647286750b901"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8282bab177a9a3081fd3d0a0175a07a1e2bfb7fcbbd949519ea0980f8a07144d"}, - {file = "pydantic_core-2.10.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:aafdb89fdeb5fe165043896817eccd6434aee124d5ee9b354f92cd574ba5e78f"}, - {file = "pydantic_core-2.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f6defd966ca3b187ec6c366604e9296f585021d922e666b99c47e78738b5666c"}, - {file = "pydantic_core-2.10.1-cp312-none-win32.whl", hash = "sha256:7c4d1894fe112b0864c1fa75dffa045720a194b227bed12f4be7f6045b25209f"}, - {file = "pydantic_core-2.10.1-cp312-none-win_amd64.whl", hash = "sha256:5994985da903d0b8a08e4935c46ed8daf5be1cf217489e673910951dc533d430"}, - {file = "pydantic_core-2.10.1-cp312-none-win_arm64.whl", hash = "sha256:0d8a8adef23d86d8eceed3e32e9cca8879c7481c183f84ed1a8edc7df073af94"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:9badf8d45171d92387410b04639d73811b785b5161ecadabf056ea14d62d4ede"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:ebedb45b9feb7258fac0a268a3f6bec0a2ea4d9558f3d6f813f02ff3a6dc6698"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfe1090245c078720d250d19cb05d67e21a9cd7c257698ef139bc41cf6c27b4f"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e357571bb0efd65fd55f18db0a2fb0ed89d0bb1d41d906b138f088933ae618bb"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b3dcd587b69bbf54fc04ca157c2323b8911033e827fffaecf0cafa5a892a0904"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c120c9ce3b163b985a3b966bb701114beb1da4b0468b9b236fc754783d85aa3"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15d6bca84ffc966cc9976b09a18cf9543ed4d4ecbd97e7086f9ce9327ea48891"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cabb9710f09d5d2e9e2748c3e3e20d991a4c5f96ed8f1132518f54ab2967221"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:82f55187a5bebae7d81d35b1e9aaea5e169d44819789837cdd4720d768c55d15"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1d40f55222b233e98e3921df7811c27567f0e1a4411b93d4c5c0f4ce131bc42f"}, - {file = "pydantic_core-2.10.1-cp37-none-win32.whl", hash = "sha256:14e09ff0b8fe6e46b93d36a878f6e4a3a98ba5303c76bb8e716f4878a3bee92c"}, - {file = "pydantic_core-2.10.1-cp37-none-win_amd64.whl", hash = "sha256:1396e81b83516b9d5c9e26a924fa69164156c148c717131f54f586485ac3c15e"}, - {file = "pydantic_core-2.10.1-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:6835451b57c1b467b95ffb03a38bb75b52fb4dc2762bb1d9dbed8de31ea7d0fc"}, - {file = "pydantic_core-2.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b00bc4619f60c853556b35f83731bd817f989cba3e97dc792bb8c97941b8053a"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fa467fd300a6f046bdb248d40cd015b21b7576c168a6bb20aa22e595c8ffcdd"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d99277877daf2efe074eae6338453a4ed54a2d93fb4678ddfe1209a0c93a2468"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aad7bd686363d1ce4ee930ad39f14e1673248373f4a9d74d2b9554f06199fb58"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:443fed67d33aa85357464f297e3d26e570267d1af6fef1c21ca50921d2976302"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:042462d8d6ba707fd3ce9649e7bf268633a41018d6a998fb5fbacb7e928a183e"}, - {file = "pydantic_core-2.10.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ecdbde46235f3d560b18be0cb706c8e8ad1b965e5c13bbba7450c86064e96561"}, - {file = "pydantic_core-2.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ed550ed05540c03f0e69e6d74ad58d026de61b9eaebebbaaf8873e585cbb18de"}, - {file = "pydantic_core-2.10.1-cp38-none-win32.whl", hash = "sha256:8cdbbd92154db2fec4ec973d45c565e767ddc20aa6dbaf50142676484cbff8ee"}, - {file = "pydantic_core-2.10.1-cp38-none-win_amd64.whl", hash = "sha256:9f6f3e2598604956480f6c8aa24a3384dbf6509fe995d97f6ca6103bb8c2534e"}, - {file = "pydantic_core-2.10.1-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:655f8f4c8d6a5963c9a0687793da37b9b681d9ad06f29438a3b2326d4e6b7970"}, - {file = "pydantic_core-2.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e570ffeb2170e116a5b17e83f19911020ac79d19c96f320cbfa1fa96b470185b"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64322bfa13e44c6c30c518729ef08fda6026b96d5c0be724b3c4ae4da939f875"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485a91abe3a07c3a8d1e082ba29254eea3e2bb13cbbd4351ea4e5a21912cc9b0"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7c2b8eb9fc872e68b46eeaf835e86bccc3a58ba57d0eedc109cbb14177be531"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5cb87bdc2e5f620693148b5f8f842d293cae46c5f15a1b1bf7ceeed324a740c"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25bd966103890ccfa028841a8f30cebcf5875eeac8c4bde4fe221364c92f0c9a"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f323306d0556351735b54acbf82904fe30a27b6a7147153cbe6e19aaaa2aa429"}, - {file = "pydantic_core-2.10.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0c27f38dc4fbf07b358b2bc90edf35e82d1703e22ff2efa4af4ad5de1b3833e7"}, - {file = "pydantic_core-2.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f1365e032a477c1430cfe0cf2856679529a2331426f8081172c4a74186f1d595"}, - {file = "pydantic_core-2.10.1-cp39-none-win32.whl", hash = "sha256:a1c311fd06ab3b10805abb72109f01a134019739bd3286b8ae1bc2fc4e50c07a"}, - {file = "pydantic_core-2.10.1-cp39-none-win_amd64.whl", hash = "sha256:ae8a8843b11dc0b03b57b52793e391f0122e740de3df1474814c700d2622950a"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d43002441932f9a9ea5d6f9efaa2e21458221a3a4b417a14027a1d530201ef1b"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:962ed72424bf1f72334e2f1e61b68f16c0e596f024ca7ac5daf229f7c26e4208"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2cf5bb4dd67f20f3bbc1209ef572a259027c49e5ff694fa56bed62959b41e1f9"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e544246b859f17373bed915182ab841b80849ed9cf23f1f07b73b7c58baee5fb"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c0877239307b7e69d025b73774e88e86ce82f6ba6adf98f41069d5b0b78bd1bf"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:53df009d1e1ba40f696f8995683e067e3967101d4bb4ea6f667931b7d4a01357"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a1254357f7e4c82e77c348dabf2d55f1d14d19d91ff025004775e70a6ef40ada"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:524ff0ca3baea164d6d93a32c58ac79eca9f6cf713586fdc0adb66a8cdeab96a"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f0ac9fb8608dbc6eaf17956bf623c9119b4db7dbb511650910a82e261e6600f"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:320f14bd4542a04ab23747ff2c8a778bde727158b606e2661349557f0770711e"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63974d168b6233b4ed6a0046296803cb13c56637a7b8106564ab575926572a55"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:417243bf599ba1f1fef2bb8c543ceb918676954734e2dcb82bf162ae9d7bd514"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dda81e5ec82485155a19d9624cfcca9be88a405e2857354e5b089c2a982144b2"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:14cfbb00959259e15d684505263d5a21732b31248a5dd4941f73a3be233865b9"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:631cb7415225954fdcc2a024119101946793e5923f6c4d73a5914d27eb3d3a05"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:bec7dd208a4182e99c5b6c501ce0b1f49de2802448d4056091f8e630b28e9a52"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:149b8a07712f45b332faee1a2258d8ef1fb4a36f88c0c17cb687f205c5dc6e7d"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d966c47f9dd73c2d32a809d2be529112d509321c5310ebf54076812e6ecd884"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7eb037106f5c6b3b0b864ad226b0b7ab58157124161d48e4b30c4a43fef8bc4b"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:154ea7c52e32dce13065dbb20a4a6f0cc012b4f667ac90d648d36b12007fa9f7"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e562617a45b5a9da5be4abe72b971d4f00bf8555eb29bb91ec2ef2be348cd132"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:f23b55eb5464468f9e0e9a9935ce3ed2a870608d5f534025cd5536bca25b1402"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:e9121b4009339b0f751955baf4543a0bfd6bc3f8188f8056b1a25a2d45099934"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0523aeb76e03f753b58be33b26540880bac5aa54422e4462404c432230543f33"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0e2959ef5d5b8dc9ef21e1a305a21a36e254e6a34432d00c72a92fdc5ecda5"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da01bec0a26befab4898ed83b362993c844b9a607a86add78604186297eb047e"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f2e9072d71c1f6cfc79a36d4484c82823c560e6f5599c43c1ca6b5cdbd54f881"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f36a3489d9e28fe4b67be9992a23029c3cec0babc3bd9afb39f49844a8c721c5"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f64f82cc3443149292b32387086d02a6c7fb39b8781563e0ca7b8d7d9cf72bd7"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b4a6db486ac8e99ae696e09efc8b2b9fea67b63c8f88ba7a1a16c24a057a0776"}, - {file = "pydantic_core-2.10.1.tar.gz", hash = "sha256:0f8682dbdd2f67f8e1edddcbffcc29f60a6182b4901c367fc8c1c40d30bb0a82"}, + {file = "pydantic_core-2.16.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3fab4e75b8c525a4776e7630b9ee48aea50107fea6ca9f593c98da3f4d11bf7c"}, + {file = "pydantic_core-2.16.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8bde5b48c65b8e807409e6f20baee5d2cd880e0fad00b1a811ebc43e39a00ab2"}, + {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2924b89b16420712e9bb8192396026a8fbd6d8726224f918353ac19c4c043d2a"}, + {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:16aa02e7a0f539098e215fc193c8926c897175d64c7926d00a36188917717a05"}, + {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:936a787f83db1f2115ee829dd615c4f684ee48ac4de5779ab4300994d8af325b"}, + {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:459d6be6134ce3b38e0ef76f8a672924460c455d45f1ad8fdade36796df1ddc8"}, + {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9ee4febb249c591d07b2d4dd36ebcad0ccd128962aaa1801508320896575ef"}, + {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40a0bd0bed96dae5712dab2aba7d334a6c67cbcac2ddfca7dbcc4a8176445990"}, + {file = "pydantic_core-2.16.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:870dbfa94de9b8866b37b867a2cb37a60c401d9deb4a9ea392abf11a1f98037b"}, + {file = "pydantic_core-2.16.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:308974fdf98046db28440eb3377abba274808bf66262e042c412eb2adf852731"}, + {file = "pydantic_core-2.16.2-cp310-none-win32.whl", hash = "sha256:a477932664d9611d7a0816cc3c0eb1f8856f8a42435488280dfbf4395e141485"}, + {file = "pydantic_core-2.16.2-cp310-none-win_amd64.whl", hash = "sha256:8f9142a6ed83d90c94a3efd7af8873bf7cefed2d3d44387bf848888482e2d25f"}, + {file = "pydantic_core-2.16.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:406fac1d09edc613020ce9cf3f2ccf1a1b2f57ab00552b4c18e3d5276c67eb11"}, + {file = "pydantic_core-2.16.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ce232a6170dd6532096cadbf6185271e4e8c70fc9217ebe105923ac105da9978"}, + {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a90fec23b4b05a09ad988e7a4f4e081711a90eb2a55b9c984d8b74597599180f"}, + {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8aafeedb6597a163a9c9727d8a8bd363a93277701b7bfd2749fbefee2396469e"}, + {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9957433c3a1b67bdd4c63717eaf174ebb749510d5ea612cd4e83f2d9142f3fc8"}, + {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0d7a9165167269758145756db43a133608a531b1e5bb6a626b9ee24bc38a8f7"}, + {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dffaf740fe2e147fedcb6b561353a16243e654f7fe8e701b1b9db148242e1272"}, + {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ed79883b4328b7f0bd142733d99c8e6b22703e908ec63d930b06be3a0e7113"}, + {file = "pydantic_core-2.16.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:cf903310a34e14651c9de056fcc12ce090560864d5a2bb0174b971685684e1d8"}, + {file = "pydantic_core-2.16.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46b0d5520dbcafea9a8645a8164658777686c5c524d381d983317d29687cce97"}, + {file = "pydantic_core-2.16.2-cp311-none-win32.whl", hash = "sha256:70651ff6e663428cea902dac297066d5c6e5423fda345a4ca62430575364d62b"}, + {file = "pydantic_core-2.16.2-cp311-none-win_amd64.whl", hash = "sha256:98dc6f4f2095fc7ad277782a7c2c88296badcad92316b5a6e530930b1d475ebc"}, + {file = "pydantic_core-2.16.2-cp311-none-win_arm64.whl", hash = "sha256:ef6113cd31411eaf9b39fc5a8848e71c72656fd418882488598758b2c8c6dfa0"}, + {file = "pydantic_core-2.16.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:88646cae28eb1dd5cd1e09605680c2b043b64d7481cdad7f5003ebef401a3039"}, + {file = "pydantic_core-2.16.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7b883af50eaa6bb3299780651e5be921e88050ccf00e3e583b1e92020333304b"}, + {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bf26c2e2ea59d32807081ad51968133af3025c4ba5753e6a794683d2c91bf6e"}, + {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99af961d72ac731aae2a1b55ccbdae0733d816f8bfb97b41909e143de735f522"}, + {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02906e7306cb8c5901a1feb61f9ab5e5c690dbbeaa04d84c1b9ae2a01ebe9379"}, + {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5362d099c244a2d2f9659fb3c9db7c735f0004765bbe06b99be69fbd87c3f15"}, + {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ac426704840877a285d03a445e162eb258924f014e2f074e209d9b4ff7bf380"}, + {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b94cbda27267423411c928208e89adddf2ea5dd5f74b9528513f0358bba019cb"}, + {file = "pydantic_core-2.16.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6db58c22ac6c81aeac33912fb1af0e930bc9774166cdd56eade913d5f2fff35e"}, + {file = "pydantic_core-2.16.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:396fdf88b1b503c9c59c84a08b6833ec0c3b5ad1a83230252a9e17b7dfb4cffc"}, + {file = "pydantic_core-2.16.2-cp312-none-win32.whl", hash = "sha256:7c31669e0c8cc68400ef0c730c3a1e11317ba76b892deeefaf52dcb41d56ed5d"}, + {file = "pydantic_core-2.16.2-cp312-none-win_amd64.whl", hash = "sha256:a3b7352b48fbc8b446b75f3069124e87f599d25afb8baa96a550256c031bb890"}, + {file = "pydantic_core-2.16.2-cp312-none-win_arm64.whl", hash = "sha256:a9e523474998fb33f7c1a4d55f5504c908d57add624599e095c20fa575b8d943"}, + {file = "pydantic_core-2.16.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ae34418b6b389d601b31153b84dce480351a352e0bb763684a1b993d6be30f17"}, + {file = "pydantic_core-2.16.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:732bd062c9e5d9582a30e8751461c1917dd1ccbdd6cafb032f02c86b20d2e7ec"}, + {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b52776a2e3230f4854907a1e0946eec04d41b1fc64069ee774876bbe0eab55"}, + {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef551c053692b1e39e3f7950ce2296536728871110e7d75c4e7753fb30ca87f4"}, + {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ebb892ed8599b23fa8f1799e13a12c87a97a6c9d0f497525ce9858564c4575a4"}, + {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa6c8c582036275997a733427b88031a32ffa5dfc3124dc25a730658c47a572f"}, + {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ba0884a91f1aecce75202473ab138724aa4fb26d7707f2e1fa6c3e68c84fbf"}, + {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7924e54f7ce5d253d6160090ddc6df25ed2feea25bfb3339b424a9dd591688bc"}, + {file = "pydantic_core-2.16.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69a7b96b59322a81c2203be537957313b07dd333105b73db0b69212c7d867b4b"}, + {file = "pydantic_core-2.16.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7e6231aa5bdacda78e96ad7b07d0c312f34ba35d717115f4b4bff6cb87224f0f"}, + {file = "pydantic_core-2.16.2-cp38-none-win32.whl", hash = "sha256:41dac3b9fce187a25c6253ec79a3f9e2a7e761eb08690e90415069ea4a68ff7a"}, + {file = "pydantic_core-2.16.2-cp38-none-win_amd64.whl", hash = "sha256:f685dbc1fdadb1dcd5b5e51e0a378d4685a891b2ddaf8e2bba89bd3a7144e44a"}, + {file = "pydantic_core-2.16.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:55749f745ebf154c0d63d46c8c58594d8894b161928aa41adbb0709c1fe78b77"}, + {file = "pydantic_core-2.16.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b30b0dd58a4509c3bd7eefddf6338565c4905406aee0c6e4a5293841411a1286"}, + {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18de31781cdc7e7b28678df7c2d7882f9692ad060bc6ee3c94eb15a5d733f8f7"}, + {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5864b0242f74b9dd0b78fd39db1768bc3f00d1ffc14e596fd3e3f2ce43436a33"}, + {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8f9186ca45aee030dc8234118b9c0784ad91a0bb27fc4e7d9d6608a5e3d386c"}, + {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc6f6c9be0ab6da37bc77c2dda5f14b1d532d5dbef00311ee6e13357a418e646"}, + {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa057095f621dad24a1e906747179a69780ef45cc8f69e97463692adbcdae878"}, + {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ad84731a26bcfb299f9eab56c7932d46f9cad51c52768cace09e92a19e4cf55"}, + {file = "pydantic_core-2.16.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3b052c753c4babf2d1edc034c97851f867c87d6f3ea63a12e2700f159f5c41c3"}, + {file = "pydantic_core-2.16.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e0f686549e32ccdb02ae6f25eee40cc33900910085de6aa3790effd391ae10c2"}, + {file = "pydantic_core-2.16.2-cp39-none-win32.whl", hash = "sha256:7afb844041e707ac9ad9acad2188a90bffce2c770e6dc2318be0c9916aef1469"}, + {file = "pydantic_core-2.16.2-cp39-none-win_amd64.whl", hash = "sha256:9da90d393a8227d717c19f5397688a38635afec89f2e2d7af0df037f3249c39a"}, + {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5f60f920691a620b03082692c378661947d09415743e437a7478c309eb0e4f82"}, + {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:47924039e785a04d4a4fa49455e51b4eb3422d6eaacfde9fc9abf8fdef164e8a"}, + {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6294e76b0380bb7a61eb8a39273c40b20beb35e8c87ee101062834ced19c545"}, + {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe56851c3f1d6f5384b3051c536cc81b3a93a73faf931f404fef95217cf1e10d"}, + {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9d776d30cde7e541b8180103c3f294ef7c1862fd45d81738d156d00551005784"}, + {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:72f7919af5de5ecfaf1eba47bf9a5d8aa089a3340277276e5636d16ee97614d7"}, + {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:4bfcbde6e06c56b30668a0c872d75a7ef3025dc3c1823a13cf29a0e9b33f67e8"}, + {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ff7c97eb7a29aba230389a2661edf2e9e06ce616c7e35aa764879b6894a44b25"}, + {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9b5f13857da99325dcabe1cc4e9e6a3d7b2e2c726248ba5dd4be3e8e4a0b6d0e"}, + {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a7e41e3ada4cca5f22b478c08e973c930e5e6c7ba3588fb8e35f2398cdcc1545"}, + {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60eb8ceaa40a41540b9acae6ae7c1f0a67d233c40dc4359c256ad2ad85bdf5e5"}, + {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7beec26729d496a12fd23cf8da9944ee338c8b8a17035a560b585c36fe81af20"}, + {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22c5f022799f3cd6741e24f0443ead92ef42be93ffda0d29b2597208c94c3753"}, + {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:eca58e319f4fd6df004762419612122b2c7e7d95ffafc37e890252f869f3fb2a"}, + {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed957db4c33bc99895f3a1672eca7e80e8cda8bd1e29a80536b4ec2153fa9804"}, + {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:459c0d338cc55d099798618f714b21b7ece17eb1a87879f2da20a3ff4c7628e2"}, + {file = "pydantic_core-2.16.2.tar.gz", hash = "sha256:0ba503850d8b8dcc18391f10de896ae51d37fe5fe43dbfb6a35c5c5cad271a06"}, ] [package.dependencies] @@ -406,7 +353,6 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" @@ -437,83 +383,18 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] -[[package]] -name = "typed-ast" -version = "1.5.5" -description = "a fork of Python 2 and 3 ast modules with type comment support" -optional = false -python-versions = ">=3.6" -files = [ - {file = "typed_ast-1.5.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4bc1efe0ce3ffb74784e06460f01a223ac1f6ab31c6bc0376a21184bf5aabe3b"}, - {file = "typed_ast-1.5.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f7a8c46a8b333f71abd61d7ab9255440d4a588f34a21f126bbfc95f6049e686"}, - {file = "typed_ast-1.5.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:597fc66b4162f959ee6a96b978c0435bd63791e31e4f410622d19f1686d5e769"}, - {file = "typed_ast-1.5.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d41b7a686ce653e06c2609075d397ebd5b969d821b9797d029fccd71fdec8e04"}, - {file = "typed_ast-1.5.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5fe83a9a44c4ce67c796a1b466c270c1272e176603d5e06f6afbc101a572859d"}, - {file = "typed_ast-1.5.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d5c0c112a74c0e5db2c75882a0adf3133adedcdbfd8cf7c9d6ed77365ab90a1d"}, - {file = "typed_ast-1.5.5-cp310-cp310-win_amd64.whl", hash = "sha256:e1a976ed4cc2d71bb073e1b2a250892a6e968ff02aa14c1f40eba4f365ffec02"}, - {file = "typed_ast-1.5.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c631da9710271cb67b08bd3f3813b7af7f4c69c319b75475436fcab8c3d21bee"}, - {file = "typed_ast-1.5.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b445c2abfecab89a932b20bd8261488d574591173d07827c1eda32c457358b18"}, - {file = "typed_ast-1.5.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc95ffaaab2be3b25eb938779e43f513e0e538a84dd14a5d844b8f2932593d88"}, - {file = "typed_ast-1.5.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61443214d9b4c660dcf4b5307f15c12cb30bdfe9588ce6158f4a005baeb167b2"}, - {file = "typed_ast-1.5.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6eb936d107e4d474940469e8ec5b380c9b329b5f08b78282d46baeebd3692dc9"}, - {file = "typed_ast-1.5.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e48bf27022897577d8479eaed64701ecaf0467182448bd95759883300ca818c8"}, - {file = "typed_ast-1.5.5-cp311-cp311-win_amd64.whl", hash = "sha256:83509f9324011c9a39faaef0922c6f720f9623afe3fe220b6d0b15638247206b"}, - {file = "typed_ast-1.5.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:44f214394fc1af23ca6d4e9e744804d890045d1643dd7e8229951e0ef39429b5"}, - {file = "typed_ast-1.5.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:118c1ce46ce58fda78503eae14b7664163aa735b620b64b5b725453696f2a35c"}, - {file = "typed_ast-1.5.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be4919b808efa61101456e87f2d4c75b228f4e52618621c77f1ddcaae15904fa"}, - {file = "typed_ast-1.5.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fc2b8c4e1bc5cd96c1a823a885e6b158f8451cf6f5530e1829390b4d27d0807f"}, - {file = "typed_ast-1.5.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:16f7313e0a08c7de57f2998c85e2a69a642e97cb32f87eb65fbfe88381a5e44d"}, - {file = "typed_ast-1.5.5-cp36-cp36m-win_amd64.whl", hash = "sha256:2b946ef8c04f77230489f75b4b5a4a6f24c078be4aed241cfabe9cbf4156e7e5"}, - {file = "typed_ast-1.5.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2188bc33d85951ea4ddad55d2b35598b2709d122c11c75cffd529fbc9965508e"}, - {file = "typed_ast-1.5.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0635900d16ae133cab3b26c607586131269f88266954eb04ec31535c9a12ef1e"}, - {file = "typed_ast-1.5.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57bfc3cf35a0f2fdf0a88a3044aafaec1d2f24d8ae8cd87c4f58d615fb5b6311"}, - {file = "typed_ast-1.5.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2"}, - {file = "typed_ast-1.5.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d09d930c2d1d621f717bb217bf1fe2584616febb5138d9b3e8cdd26506c3f6d4"}, - {file = "typed_ast-1.5.5-cp37-cp37m-win_amd64.whl", hash = "sha256:d40c10326893ecab8a80a53039164a224984339b2c32a6baf55ecbd5b1df6431"}, - {file = "typed_ast-1.5.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a"}, - {file = "typed_ast-1.5.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ed4a1a42df8a3dfb6b40c3d2de109e935949f2f66b19703eafade03173f8f437"}, - {file = "typed_ast-1.5.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:045f9930a1550d9352464e5149710d56a2aed23a2ffe78946478f7b5416f1ede"}, - {file = "typed_ast-1.5.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:381eed9c95484ceef5ced626355fdc0765ab51d8553fec08661dce654a935db4"}, - {file = "typed_ast-1.5.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bfd39a41c0ef6f31684daff53befddae608f9daf6957140228a08e51f312d7e6"}, - {file = "typed_ast-1.5.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8c524eb3024edcc04e288db9541fe1f438f82d281e591c548903d5b77ad1ddd4"}, - {file = "typed_ast-1.5.5-cp38-cp38-win_amd64.whl", hash = "sha256:7f58fabdde8dcbe764cef5e1a7fcb440f2463c1bbbec1cf2a86ca7bc1f95184b"}, - {file = "typed_ast-1.5.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10"}, - {file = "typed_ast-1.5.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:622e4a006472b05cf6ef7f9f2636edc51bda670b7bbffa18d26b255269d3d814"}, - {file = "typed_ast-1.5.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1efebbbf4604ad1283e963e8915daa240cb4bf5067053cf2f0baadc4d4fb51b8"}, - {file = "typed_ast-1.5.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0aefdd66f1784c58f65b502b6cf8b121544680456d1cebbd300c2c813899274"}, - {file = "typed_ast-1.5.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:48074261a842acf825af1968cd912f6f21357316080ebaca5f19abbb11690c8a"}, - {file = "typed_ast-1.5.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:429ae404f69dc94b9361bb62291885894b7c6fb4640d561179548c849f8492ba"}, - {file = "typed_ast-1.5.5-cp39-cp39-win_amd64.whl", hash = "sha256:335f22ccb244da2b5c296e6f96b06ee9bed46526db0de38d2f0e5a6597b81155"}, - {file = "typed_ast-1.5.5.tar.gz", hash = "sha256:94282f7a354f36ef5dbce0ef3467ebf6a258e370ab33d5b40c249fa996e590dd"}, -] - [[package]] name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" +version = "4.9.0" +description = "Backported and Experimental Type Hints for Python 3.8+" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, ] -[[package]] -name = "zipp" -version = "3.15.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.7" -files = [ - {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, - {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - [metadata] lock-version = "2.0" -python-versions = "^3.7" -content-hash = "4ac84aae6b05a415337279868d39fe7ce44104dffcd6b159af9e12dcea350ba1" +python-versions = "^3.8" +content-hash = "07e17d1cfc967cad4c92d37c316ba472010f73710fdaf1e34ed68f2b1b3487de" diff --git a/pyproject.toml b/pyproject.toml index 4ed337cd0..f91a2a89f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "cohere" -version = "5.0.0a3" +version = "5.0.0a4" description = "" readme = "README.md" authors = [] @@ -9,9 +9,10 @@ packages = [ ] [tool.poetry.dependencies] -python = "^3.7" +python = "^3.8" httpx = ">=0.21.2" -pydantic = ">= 1.9.2, < 2.5.0" +pydantic = ">= 1.9.2" +typing_extensions = ">= 4.0.0" [tool.poetry.dev-dependencies] mypy = "0.971" diff --git a/src/cohere/client.py b/src/cohere/client.py index bb002161d..d4d33d4a7 100644 --- a/src/cohere/client.py +++ b/src/cohere/client.py @@ -6,11 +6,12 @@ from json.decoder import JSONDecodeError import httpx -import typing_extensions from .core.api_error import ApiError from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from .core.jsonable_encoder import jsonable_encoder +from .core.remove_none_from_dict import remove_none_from_dict +from .core.request_options import RequestOptions from .environment import ClientEnvironment from .errors.bad_request_error import BadRequestError from .errors.internal_server_error import InternalServerError @@ -84,7 +85,6 @@ def chat_stream( *, message: str, model: typing.Optional[str] = OMIT, - stream: typing_extensions.Literal[True], preamble_override: typing.Optional[str] = OMIT, chat_history: typing.Optional[typing.List[ChatMessage]] = OMIT, conversation_id: typing.Optional[str] = OMIT, @@ -99,6 +99,7 @@ def chat_stream( p: typing.Optional[float] = OMIT, frequency_penalty: typing.Optional[float] = OMIT, presence_penalty: typing.Optional[float] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[StreamedChatResponse]: """ The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. @@ -115,8 +116,6 @@ def chat_stream( Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). - - stream: typing_extensions.Literal[True]. - - preamble_override: typing.Optional[str]. When specified, the default Cohere preamble will be replaced with the provided one. - chat_history: typing.Optional[typing.List[ChatMessage]]. A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. @@ -164,6 +163,8 @@ def chat_stream( - frequency_penalty: typing.Optional[float]. Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. - presence_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. """ _request: typing.Dict[str, typing.Any] = {"message": message, "stream": stream} if model is not OMIT: @@ -175,7 +176,7 @@ def chat_stream( if conversation_id is not OMIT: _request["conversation_id"] = conversation_id if prompt_truncation is not OMIT: - _request["prompt_truncation"] = prompt_truncation + _request["prompt_truncation"] = prompt_truncation.value if connectors is not OMIT: _request["connectors"] = connectors if search_queries_only is not OMIT: @@ -183,7 +184,7 @@ def chat_stream( if documents is not OMIT: _request["documents"] = documents if citation_quality is not OMIT: - _request["citation_quality"] = citation_quality + _request["citation_quality"] = citation_quality.value if temperature is not OMIT: _request["temperature"] = temperature if max_tokens is not OMIT: @@ -198,10 +199,27 @@ def chat_stream( _request["presence_penalty"] = presence_penalty with self._client_wrapper.httpx_client.stream( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/chat"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "chat"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) as _response: if 200 <= _response.status_code < 300: for _text in _response.iter_lines(): @@ -223,7 +241,6 @@ def chat( *, message: str, model: typing.Optional[str] = OMIT, - stream: typing_extensions.Literal[False], preamble_override: typing.Optional[str] = OMIT, chat_history: typing.Optional[typing.List[ChatMessage]] = OMIT, conversation_id: typing.Optional[str] = OMIT, @@ -238,6 +255,7 @@ def chat( p: typing.Optional[float] = OMIT, frequency_penalty: typing.Optional[float] = OMIT, presence_penalty: typing.Optional[float] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> NonStreamedChatResponse: """ The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. @@ -254,8 +272,6 @@ def chat( Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). - - stream: typing_extensions.Literal[False]. - - preamble_override: typing.Optional[str]. When specified, the default Cohere preamble will be replaced with the provided one. - chat_history: typing.Optional[typing.List[ChatMessage]]. A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. @@ -303,15 +319,10 @@ def chat( - frequency_penalty: typing.Optional[float]. Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. - presence_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import ( - ChatMessage, - ChatMessageRole, - ChatRequestCitationQuality, - ChatRequestPromptOverride, - ChatRequestPromptTruncation, - ChatRequestSearchOptions, - ) + from cohere import ChatMessage, ChatMessageRole, ChatRequestPromptTruncation from cohere.client import Client client = Client( @@ -330,12 +341,13 @@ def chat( role=ChatMessageRole.CHATBOT, message="How can I help you today?", ), + ChatMessage( + role=ChatMessageRole.CHATBOT, + message="message", + ), ], prompt_truncation=ChatRequestPromptTruncation.OFF, - citation_quality=ChatRequestCitationQuality.FAST, temperature=0.3, - search_options=ChatRequestSearchOptions(), - prompt_override=ChatRequestPromptOverride(), ) """ _request: typing.Dict[str, typing.Any] = {"message": message, "stream": stream} @@ -348,7 +360,7 @@ def chat( if conversation_id is not OMIT: _request["conversation_id"] = conversation_id if prompt_truncation is not OMIT: - _request["prompt_truncation"] = prompt_truncation + _request["prompt_truncation"] = prompt_truncation.value if connectors is not OMIT: _request["connectors"] = connectors if search_queries_only is not OMIT: @@ -356,7 +368,7 @@ def chat( if documents is not OMIT: _request["documents"] = documents if citation_quality is not OMIT: - _request["citation_quality"] = citation_quality + _request["citation_quality"] = citation_quality.value if temperature is not OMIT: _request["temperature"] = temperature if max_tokens is not OMIT: @@ -371,10 +383,27 @@ def chat( _request["presence_penalty"] = presence_penalty _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/chat"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "chat"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(NonStreamedChatResponse, _response.json()) # type: ignore @@ -392,7 +421,6 @@ def generate_stream( prompt: str, model: typing.Optional[str] = OMIT, num_generations: typing.Optional[int] = OMIT, - stream: typing_extensions.Literal[True], max_tokens: typing.Optional[int] = OMIT, truncate: typing.Optional[GenerateStreamRequestTruncate] = OMIT, temperature: typing.Optional[float] = OMIT, @@ -406,6 +434,7 @@ def generate_stream( return_likelihoods: typing.Optional[GenerateStreamRequestReturnLikelihoods] = OMIT, logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT, raw_prompting: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[GenerateStreamedResponse]: """ This endpoint generates realistic text conditioned on a given input. @@ -418,8 +447,6 @@ def generate_stream( Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. - num_generations: typing.Optional[int]. The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. - - stream: typing_extensions.Literal[True]. - - max_tokens: typing.Optional[int]. The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. @@ -468,6 +495,8 @@ def generate_stream( For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. """ _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": stream} if model is not OMIT: @@ -477,7 +506,7 @@ def generate_stream( if max_tokens is not OMIT: _request["max_tokens"] = max_tokens if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value if temperature is not OMIT: _request["temperature"] = temperature if preset is not OMIT: @@ -495,17 +524,34 @@ def generate_stream( if presence_penalty is not OMIT: _request["presence_penalty"] = presence_penalty if return_likelihoods is not OMIT: - _request["return_likelihoods"] = return_likelihoods + _request["return_likelihoods"] = return_likelihoods.value if logit_bias is not OMIT: _request["logit_bias"] = logit_bias if raw_prompting is not OMIT: _request["raw_prompting"] = raw_prompting with self._client_wrapper.httpx_client.stream( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "generate"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) as _response: if 200 <= _response.status_code < 300: for _text in _response.iter_lines(): @@ -532,7 +578,6 @@ def generate( prompt: str, model: typing.Optional[str] = OMIT, num_generations: typing.Optional[int] = OMIT, - stream: typing_extensions.Literal[False], max_tokens: typing.Optional[int] = OMIT, truncate: typing.Optional[GenerateRequestTruncate] = OMIT, temperature: typing.Optional[float] = OMIT, @@ -546,6 +591,7 @@ def generate( return_likelihoods: typing.Optional[GenerateRequestReturnLikelihoods] = OMIT, logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT, raw_prompting: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> Generation: """ This endpoint generates realistic text conditioned on a given input. @@ -558,8 +604,6 @@ def generate( Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. - num_generations: typing.Optional[int]. The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. - - stream: typing_extensions.Literal[False]. - - max_tokens: typing.Optional[int]. The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. @@ -608,8 +652,9 @@ def generate( For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import GenerateRequestReturnLikelihoods, GenerateRequestTruncate from cohere.client import Client client = Client( @@ -619,9 +664,7 @@ def generate( client.generate( prompt="Please explain to me how LLMs work", stream=False, - truncate=GenerateRequestTruncate.NONE, preset="my-preset-a58sbd", - return_likelihoods=GenerateRequestReturnLikelihoods.GENERATION, ) """ _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": stream} @@ -632,7 +675,7 @@ def generate( if max_tokens is not OMIT: _request["max_tokens"] = max_tokens if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value if temperature is not OMIT: _request["temperature"] = temperature if preset is not OMIT: @@ -650,17 +693,34 @@ def generate( if presence_penalty is not OMIT: _request["presence_penalty"] = presence_penalty if return_likelihoods is not OMIT: - _request["return_likelihoods"] = return_likelihoods + _request["return_likelihoods"] = return_likelihoods.value if logit_bias is not OMIT: _request["logit_bias"] = logit_bias if raw_prompting is not OMIT: _request["raw_prompting"] = raw_prompting _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "generate"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(Generation, _response.json()) # type: ignore @@ -684,6 +744,7 @@ def embed( input_type: typing.Optional[EmbedInputType] = OMIT, embedding_types: typing.Optional[typing.List[EmbedRequestEmbeddingTypesItem]] = OMIT, truncate: typing.Optional[EmbedRequestTruncate] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> EmbedResponse: """ This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. @@ -723,22 +784,40 @@ def embed( Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. """ _request: typing.Dict[str, typing.Any] = {"texts": texts} if model is not OMIT: _request["model"] = model if input_type is not OMIT: - _request["input_type"] = input_type + _request["input_type"] = input_type.value if embedding_types is not OMIT: _request["embedding_types"] = embedding_types if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/embed"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "embed"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(EmbedResponse, _response.json()) # type: ignore @@ -763,6 +842,7 @@ def rerank( top_n: typing.Optional[int] = OMIT, return_documents: typing.Optional[bool] = OMIT, max_chunks_per_doc: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> RerankResponse: """ This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. @@ -783,6 +863,8 @@ def rerank( - return_documents: typing.Optional[bool]. - If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request. - If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request. - max_chunks_per_doc: typing.Optional[int]. The maximum number of chunks to produce internally from a document + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -807,10 +889,27 @@ def rerank( _request["max_chunks_per_doc"] = max_chunks_per_doc _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/rerank"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "rerank"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(RerankResponse, _response.json()) # type: ignore @@ -830,6 +929,7 @@ def classify( model: typing.Optional[str] = OMIT, preset: typing.Optional[str] = OMIT, truncate: typing.Optional[ClassifyRequestTruncate] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> ClassifyResponse: """ This endpoint makes a prediction about which label fits the specified text inputs best. To make a prediction, Classify uses the provided `examples` of text + label pairs as a reference. @@ -847,8 +947,10 @@ def classify( - truncate: typing.Optional[ClassifyRequestTruncate]. One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. - If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.--- - from cohere import ClassifyExample, ClassifyRequestTruncate + If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere import ClassifyExample from cohere.client import Client client = Client( @@ -856,7 +958,11 @@ def classify( token="YOUR_TOKEN", ) client.classify( - inputs=["Confirm your email address", "hey i need u to send some $"], + inputs=[ + "Confirm your email address", + "hey i need u to send some $", + "inputs", + ], examples=[ ClassifyExample( text="Dermatologists don't like her!", @@ -898,9 +1004,9 @@ def classify( text="Pre-read for tomorrow", label="Not spam", ), + ClassifyExample(), ], preset="my-preset-a58sbd", - truncate=ClassifyRequestTruncate.NONE, ) """ _request: typing.Dict[str, typing.Any] = {"inputs": inputs, "examples": examples} @@ -909,13 +1015,30 @@ def classify( if preset is not OMIT: _request["preset"] = preset if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/classify"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "classify"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(ClassifyResponse, _response.json()) # type: ignore @@ -941,6 +1064,7 @@ def summarize( extractiveness: typing.Optional[SummarizeRequestExtractiveness] = OMIT, temperature: typing.Optional[float] = OMIT, additional_command: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> SummarizeResponse: """ This endpoint generates a summary in English for a given text. @@ -959,36 +1083,53 @@ def summarize( - temperature: typing.Optional[float]. Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. - additional_command: typing.Optional[str]. A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda" + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import (SummarizeRequestExtractiveness, SummarizeRequestFormat, - SummarizeRequestLength) from cohere.client import Client client = Client(client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) client.summarize(text='Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases. The meaning of the name "ice cream" varies from one country to another. In some countries, such as the United States, "ice cream" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled "frozen dairy dessert" instead. In other countries, such as Italy and Argentina, one word is used fo - all variants. Analogues made from dairy alternatives, such as goat"s or sheep"s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', length=SummarizeRequestLength.SHORT, format=SummarizeRequestFormat.PARAGRAPH, extractiveness=SummarizeRequestExtractiveness.LOW, ) + all variants. Analogues made from dairy alternatives, such as goat"s or sheep"s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', ) """ _request: typing.Dict[str, typing.Any] = {"text": text} if length is not OMIT: - _request["length"] = length + _request["length"] = length.value if format is not OMIT: - _request["format"] = format + _request["format"] = format.value if model is not OMIT: _request["model"] = model if extractiveness is not OMIT: - _request["extractiveness"] = extractiveness + _request["extractiveness"] = extractiveness.value if temperature is not OMIT: _request["temperature"] = temperature if additional_command is not OMIT: _request["additional_command"] = additional_command _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/summarize"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "summarize"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(SummarizeResponse, _response.json()) # type: ignore @@ -1000,7 +1141,9 @@ def summarize( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def tokenize(self, *, text: str, model: typing.Optional[str] = OMIT) -> TokenizeResponse: + def tokenize( + self, *, text: str, model: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None + ) -> TokenizeResponse: """ This endpoint splits input text into smaller units called tokens using byte-pair encoding (BPE). To learn more about tokenization and byte pair encoding, see the tokens page. @@ -1008,6 +1151,8 @@ def tokenize(self, *, text: str, model: typing.Optional[str] = OMIT) -> Tokenize - text: str. The string to be tokenized, the minimum text length is 1 character, and the maximum text length is 65536 characters. - model: typing.Optional[str]. An optional parameter to provide the model name. This will ensure that the tokenization uses the tokenizer used by that model. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -1025,10 +1170,27 @@ def tokenize(self, *, text: str, model: typing.Optional[str] = OMIT) -> Tokenize _request["model"] = model _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/tokenize"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "tokenize"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(TokenizeResponse, _response.json()) # type: ignore @@ -1044,7 +1206,13 @@ def tokenize(self, *, text: str, model: typing.Optional[str] = OMIT) -> Tokenize raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def detokenize(self, *, tokens: typing.List[int], model: typing.Optional[str] = OMIT) -> DetokenizeResponse: + def detokenize( + self, + *, + tokens: typing.List[int], + model: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DetokenizeResponse: """ This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page. @@ -1052,6 +1220,8 @@ def detokenize(self, *, tokens: typing.List[int], model: typing.Optional[str] = - tokens: typing.List[int]. The list of tokens to be detokenized. - model: typing.Optional[str]. An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -1060,7 +1230,7 @@ def detokenize(self, *, tokens: typing.List[int], model: typing.Optional[str] = token="YOUR_TOKEN", ) client.detokenize( - tokens=[10104, 12221, 1315, 34, 1420, 69], + tokens=[10104, 12221, 1315, 34, 1420, 69, 1], ) """ _request: typing.Dict[str, typing.Any] = {"tokens": tokens} @@ -1068,10 +1238,27 @@ def detokenize(self, *, tokens: typing.List[int], model: typing.Optional[str] = _request["model"] = model _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/detokenize"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "detokenize"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DetokenizeResponse, _response.json()) # type: ignore @@ -1110,7 +1297,6 @@ async def chat_stream( *, message: str, model: typing.Optional[str] = OMIT, - stream: typing_extensions.Literal[True], preamble_override: typing.Optional[str] = OMIT, chat_history: typing.Optional[typing.List[ChatMessage]] = OMIT, conversation_id: typing.Optional[str] = OMIT, @@ -1125,6 +1311,7 @@ async def chat_stream( p: typing.Optional[float] = OMIT, frequency_penalty: typing.Optional[float] = OMIT, presence_penalty: typing.Optional[float] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[StreamedChatResponse]: """ The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. @@ -1141,8 +1328,6 @@ async def chat_stream( Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). - - stream: typing_extensions.Literal[True]. - - preamble_override: typing.Optional[str]. When specified, the default Cohere preamble will be replaced with the provided one. - chat_history: typing.Optional[typing.List[ChatMessage]]. A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. @@ -1190,6 +1375,8 @@ async def chat_stream( - frequency_penalty: typing.Optional[float]. Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. - presence_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. """ _request: typing.Dict[str, typing.Any] = {"message": message, "stream": stream} if model is not OMIT: @@ -1201,7 +1388,7 @@ async def chat_stream( if conversation_id is not OMIT: _request["conversation_id"] = conversation_id if prompt_truncation is not OMIT: - _request["prompt_truncation"] = prompt_truncation + _request["prompt_truncation"] = prompt_truncation.value if connectors is not OMIT: _request["connectors"] = connectors if search_queries_only is not OMIT: @@ -1209,7 +1396,7 @@ async def chat_stream( if documents is not OMIT: _request["documents"] = documents if citation_quality is not OMIT: - _request["citation_quality"] = citation_quality + _request["citation_quality"] = citation_quality.value if temperature is not OMIT: _request["temperature"] = temperature if max_tokens is not OMIT: @@ -1224,10 +1411,27 @@ async def chat_stream( _request["presence_penalty"] = presence_penalty async with self._client_wrapper.httpx_client.stream( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/chat"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "chat"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) as _response: if 200 <= _response.status_code < 300: async for _text in _response.aiter_lines(): @@ -1249,7 +1453,6 @@ async def chat( *, message: str, model: typing.Optional[str] = OMIT, - stream: typing_extensions.Literal[False], preamble_override: typing.Optional[str] = OMIT, chat_history: typing.Optional[typing.List[ChatMessage]] = OMIT, conversation_id: typing.Optional[str] = OMIT, @@ -1264,6 +1467,7 @@ async def chat( p: typing.Optional[float] = OMIT, frequency_penalty: typing.Optional[float] = OMIT, presence_penalty: typing.Optional[float] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> NonStreamedChatResponse: """ The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. @@ -1280,8 +1484,6 @@ async def chat( Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). - - stream: typing_extensions.Literal[False]. - - preamble_override: typing.Optional[str]. When specified, the default Cohere preamble will be replaced with the provided one. - chat_history: typing.Optional[typing.List[ChatMessage]]. A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. @@ -1329,15 +1531,10 @@ async def chat( - frequency_penalty: typing.Optional[float]. Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. - presence_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import ( - ChatMessage, - ChatMessageRole, - ChatRequestCitationQuality, - ChatRequestPromptOverride, - ChatRequestPromptTruncation, - ChatRequestSearchOptions, - ) + from cohere import ChatMessage, ChatMessageRole, ChatRequestPromptTruncation from cohere.client import AsyncClient client = AsyncClient( @@ -1356,12 +1553,13 @@ async def chat( role=ChatMessageRole.CHATBOT, message="How can I help you today?", ), + ChatMessage( + role=ChatMessageRole.CHATBOT, + message="message", + ), ], prompt_truncation=ChatRequestPromptTruncation.OFF, - citation_quality=ChatRequestCitationQuality.FAST, temperature=0.3, - search_options=ChatRequestSearchOptions(), - prompt_override=ChatRequestPromptOverride(), ) """ _request: typing.Dict[str, typing.Any] = {"message": message, "stream": stream} @@ -1374,7 +1572,7 @@ async def chat( if conversation_id is not OMIT: _request["conversation_id"] = conversation_id if prompt_truncation is not OMIT: - _request["prompt_truncation"] = prompt_truncation + _request["prompt_truncation"] = prompt_truncation.value if connectors is not OMIT: _request["connectors"] = connectors if search_queries_only is not OMIT: @@ -1382,7 +1580,7 @@ async def chat( if documents is not OMIT: _request["documents"] = documents if citation_quality is not OMIT: - _request["citation_quality"] = citation_quality + _request["citation_quality"] = citation_quality.value if temperature is not OMIT: _request["temperature"] = temperature if max_tokens is not OMIT: @@ -1397,10 +1595,27 @@ async def chat( _request["presence_penalty"] = presence_penalty _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/chat"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "chat"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(NonStreamedChatResponse, _response.json()) # type: ignore @@ -1418,7 +1633,6 @@ async def generate_stream( prompt: str, model: typing.Optional[str] = OMIT, num_generations: typing.Optional[int] = OMIT, - stream: typing_extensions.Literal[True], max_tokens: typing.Optional[int] = OMIT, truncate: typing.Optional[GenerateStreamRequestTruncate] = OMIT, temperature: typing.Optional[float] = OMIT, @@ -1432,6 +1646,7 @@ async def generate_stream( return_likelihoods: typing.Optional[GenerateStreamRequestReturnLikelihoods] = OMIT, logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT, raw_prompting: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[GenerateStreamedResponse]: """ This endpoint generates realistic text conditioned on a given input. @@ -1444,8 +1659,6 @@ async def generate_stream( Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. - num_generations: typing.Optional[int]. The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. - - stream: typing_extensions.Literal[True]. - - max_tokens: typing.Optional[int]. The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. @@ -1494,6 +1707,8 @@ async def generate_stream( For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. """ _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": stream} if model is not OMIT: @@ -1503,7 +1718,7 @@ async def generate_stream( if max_tokens is not OMIT: _request["max_tokens"] = max_tokens if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value if temperature is not OMIT: _request["temperature"] = temperature if preset is not OMIT: @@ -1521,17 +1736,34 @@ async def generate_stream( if presence_penalty is not OMIT: _request["presence_penalty"] = presence_penalty if return_likelihoods is not OMIT: - _request["return_likelihoods"] = return_likelihoods + _request["return_likelihoods"] = return_likelihoods.value if logit_bias is not OMIT: _request["logit_bias"] = logit_bias if raw_prompting is not OMIT: _request["raw_prompting"] = raw_prompting async with self._client_wrapper.httpx_client.stream( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "generate"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) as _response: if 200 <= _response.status_code < 300: async for _text in _response.aiter_lines(): @@ -1558,7 +1790,6 @@ async def generate( prompt: str, model: typing.Optional[str] = OMIT, num_generations: typing.Optional[int] = OMIT, - stream: typing_extensions.Literal[False], max_tokens: typing.Optional[int] = OMIT, truncate: typing.Optional[GenerateRequestTruncate] = OMIT, temperature: typing.Optional[float] = OMIT, @@ -1572,6 +1803,7 @@ async def generate( return_likelihoods: typing.Optional[GenerateRequestReturnLikelihoods] = OMIT, logit_bias: typing.Optional[typing.Dict[str, float]] = OMIT, raw_prompting: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> Generation: """ This endpoint generates realistic text conditioned on a given input. @@ -1584,8 +1816,6 @@ async def generate( Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. - num_generations: typing.Optional[int]. The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. - - stream: typing_extensions.Literal[False]. - - max_tokens: typing.Optional[int]. The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. @@ -1634,8 +1864,9 @@ async def generate( For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import GenerateRequestReturnLikelihoods, GenerateRequestTruncate from cohere.client import AsyncClient client = AsyncClient( @@ -1645,9 +1876,7 @@ async def generate( await client.generate( prompt="Please explain to me how LLMs work", stream=False, - truncate=GenerateRequestTruncate.NONE, preset="my-preset-a58sbd", - return_likelihoods=GenerateRequestReturnLikelihoods.GENERATION, ) """ _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": stream} @@ -1658,7 +1887,7 @@ async def generate( if max_tokens is not OMIT: _request["max_tokens"] = max_tokens if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value if temperature is not OMIT: _request["temperature"] = temperature if preset is not OMIT: @@ -1676,17 +1905,34 @@ async def generate( if presence_penalty is not OMIT: _request["presence_penalty"] = presence_penalty if return_likelihoods is not OMIT: - _request["return_likelihoods"] = return_likelihoods + _request["return_likelihoods"] = return_likelihoods.value if logit_bias is not OMIT: _request["logit_bias"] = logit_bias if raw_prompting is not OMIT: _request["raw_prompting"] = raw_prompting _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/generate"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "generate"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(Generation, _response.json()) # type: ignore @@ -1710,6 +1956,7 @@ async def embed( input_type: typing.Optional[EmbedInputType] = OMIT, embedding_types: typing.Optional[typing.List[EmbedRequestEmbeddingTypesItem]] = OMIT, truncate: typing.Optional[EmbedRequestTruncate] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> EmbedResponse: """ This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. @@ -1749,22 +1996,40 @@ async def embed( Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. """ _request: typing.Dict[str, typing.Any] = {"texts": texts} if model is not OMIT: _request["model"] = model if input_type is not OMIT: - _request["input_type"] = input_type + _request["input_type"] = input_type.value if embedding_types is not OMIT: _request["embedding_types"] = embedding_types if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/embed"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "embed"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(EmbedResponse, _response.json()) # type: ignore @@ -1789,6 +2054,7 @@ async def rerank( top_n: typing.Optional[int] = OMIT, return_documents: typing.Optional[bool] = OMIT, max_chunks_per_doc: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> RerankResponse: """ This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. @@ -1809,6 +2075,8 @@ async def rerank( - return_documents: typing.Optional[bool]. - If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request. - If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request. - max_chunks_per_doc: typing.Optional[int]. The maximum number of chunks to produce internally from a document + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -1833,10 +2101,27 @@ async def rerank( _request["max_chunks_per_doc"] = max_chunks_per_doc _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/rerank"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "rerank"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(RerankResponse, _response.json()) # type: ignore @@ -1856,6 +2141,7 @@ async def classify( model: typing.Optional[str] = OMIT, preset: typing.Optional[str] = OMIT, truncate: typing.Optional[ClassifyRequestTruncate] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> ClassifyResponse: """ This endpoint makes a prediction about which label fits the specified text inputs best. To make a prediction, Classify uses the provided `examples` of text + label pairs as a reference. @@ -1873,8 +2159,10 @@ async def classify( - truncate: typing.Optional[ClassifyRequestTruncate]. One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. - If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.--- - from cohere import ClassifyExample, ClassifyRequestTruncate + If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere import ClassifyExample from cohere.client import AsyncClient client = AsyncClient( @@ -1882,7 +2170,11 @@ async def classify( token="YOUR_TOKEN", ) await client.classify( - inputs=["Confirm your email address", "hey i need u to send some $"], + inputs=[ + "Confirm your email address", + "hey i need u to send some $", + "inputs", + ], examples=[ ClassifyExample( text="Dermatologists don't like her!", @@ -1924,9 +2216,9 @@ async def classify( text="Pre-read for tomorrow", label="Not spam", ), + ClassifyExample(), ], preset="my-preset-a58sbd", - truncate=ClassifyRequestTruncate.NONE, ) """ _request: typing.Dict[str, typing.Any] = {"inputs": inputs, "examples": examples} @@ -1935,13 +2227,30 @@ async def classify( if preset is not OMIT: _request["preset"] = preset if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/classify"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "classify"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(ClassifyResponse, _response.json()) # type: ignore @@ -1967,6 +2276,7 @@ async def summarize( extractiveness: typing.Optional[SummarizeRequestExtractiveness] = OMIT, temperature: typing.Optional[float] = OMIT, additional_command: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> SummarizeResponse: """ This endpoint generates a summary in English for a given text. @@ -1985,36 +2295,53 @@ async def summarize( - temperature: typing.Optional[float]. Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. - additional_command: typing.Optional[str]. A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda" + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import (SummarizeRequestExtractiveness, SummarizeRequestFormat, - SummarizeRequestLength) from cohere.client import AsyncClient client = AsyncClient(client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) await client.summarize(text='Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases. The meaning of the name "ice cream" varies from one country to another. In some countries, such as the United States, "ice cream" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled "frozen dairy dessert" instead. In other countries, such as Italy and Argentina, one word is used fo - all variants. Analogues made from dairy alternatives, such as goat"s or sheep"s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', length=SummarizeRequestLength.SHORT, format=SummarizeRequestFormat.PARAGRAPH, extractiveness=SummarizeRequestExtractiveness.LOW, ) + all variants. Analogues made from dairy alternatives, such as goat"s or sheep"s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', ) """ _request: typing.Dict[str, typing.Any] = {"text": text} if length is not OMIT: - _request["length"] = length + _request["length"] = length.value if format is not OMIT: - _request["format"] = format + _request["format"] = format.value if model is not OMIT: _request["model"] = model if extractiveness is not OMIT: - _request["extractiveness"] = extractiveness + _request["extractiveness"] = extractiveness.value if temperature is not OMIT: _request["temperature"] = temperature if additional_command is not OMIT: _request["additional_command"] = additional_command _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/summarize"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "summarize"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(SummarizeResponse, _response.json()) # type: ignore @@ -2026,7 +2353,9 @@ async def summarize( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def tokenize(self, *, text: str, model: typing.Optional[str] = OMIT) -> TokenizeResponse: + async def tokenize( + self, *, text: str, model: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None + ) -> TokenizeResponse: """ This endpoint splits input text into smaller units called tokens using byte-pair encoding (BPE). To learn more about tokenization and byte pair encoding, see the tokens page. @@ -2034,6 +2363,8 @@ async def tokenize(self, *, text: str, model: typing.Optional[str] = OMIT) -> To - text: str. The string to be tokenized, the minimum text length is 1 character, and the maximum text length is 65536 characters. - model: typing.Optional[str]. An optional parameter to provide the model name. This will ensure that the tokenization uses the tokenizer used by that model. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -2051,10 +2382,27 @@ async def tokenize(self, *, text: str, model: typing.Optional[str] = OMIT) -> To _request["model"] = model _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/tokenize"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "tokenize"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(TokenizeResponse, _response.json()) # type: ignore @@ -2070,7 +2418,13 @@ async def tokenize(self, *, text: str, model: typing.Optional[str] = OMIT) -> To raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def detokenize(self, *, tokens: typing.List[int], model: typing.Optional[str] = OMIT) -> DetokenizeResponse: + async def detokenize( + self, + *, + tokens: typing.List[int], + model: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DetokenizeResponse: """ This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page. @@ -2078,6 +2432,8 @@ async def detokenize(self, *, tokens: typing.List[int], model: typing.Optional[s - tokens: typing.List[int]. The list of tokens to be detokenized. - model: typing.Optional[str]. An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -2086,7 +2442,7 @@ async def detokenize(self, *, tokens: typing.List[int], model: typing.Optional[s token="YOUR_TOKEN", ) await client.detokenize( - tokens=[10104, 12221, 1315, 34, 1420, 69], + tokens=[10104, 12221, 1315, 34, 1420, 69, 1], ) """ _request: typing.Dict[str, typing.Any] = {"tokens": tokens} @@ -2094,10 +2450,27 @@ async def detokenize(self, *, tokens: typing.List[int], model: typing.Optional[s _request["model"] = model _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/detokenize"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "detokenize"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DetokenizeResponse, _response.json()) # type: ignore diff --git a/src/cohere/core/__init__.py b/src/cohere/core/__init__.py index 24149550b..e42d263c2 100644 --- a/src/cohere/core/__init__.py +++ b/src/cohere/core/__init__.py @@ -5,11 +5,13 @@ from .datetime_utils import serialize_datetime from .jsonable_encoder import jsonable_encoder from .remove_none_from_dict import remove_none_from_dict +from .request_options import RequestOptions __all__ = [ "ApiError", "AsyncClientWrapper", "BaseClientWrapper", + "RequestOptions", "SyncClientWrapper", "jsonable_encoder", "remove_none_from_dict", diff --git a/src/cohere/core/client_wrapper.py b/src/cohere/core/client_wrapper.py index 79bdc5b50..7b48892e9 100644 --- a/src/cohere/core/client_wrapper.py +++ b/src/cohere/core/client_wrapper.py @@ -21,7 +21,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "cohere", - "X-Fern-SDK-Version": "5.0.0a3", + "X-Fern-SDK-Version": "5.0.0a4", } if self._client_name is not None: headers["X-Client-Name"] = self._client_name diff --git a/src/cohere/core/request_options.py b/src/cohere/core/request_options.py new file mode 100644 index 000000000..32e86b03a --- /dev/null +++ b/src/cohere/core/request_options.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +try: + from typing import NotRequired # type: ignore +except ImportError: + from typing_extensions import NotRequired # type: ignore + + +class RequestOptions(typing.TypedDict): + """ + Additional options for request-specific configuration when calling APIs via the SDK. + This is used primarily as an optional final parameter for service functions. + + Attributes: + - timeout_in_seconds: int. The number of seconds to await an API call before timing out. + + - additional_headers: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's header dict + + - additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict + + - additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict + """ + + timeout_in_seconds: NotRequired[int] + additional_headers: NotRequired[typing.Dict[str, typing.Any]] + additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]] + additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]] diff --git a/src/cohere/environment.py b/src/cohere/environment.py index 1f8e3da1e..195c6937f 100644 --- a/src/cohere/environment.py +++ b/src/cohere/environment.py @@ -4,4 +4,4 @@ class ClientEnvironment(enum.Enum): - PRODUCTION = "https://api.cohere.ai" + PRODUCTION = "https://api.cohere.ai/v1" diff --git a/src/cohere/resources/connectors/client.py b/src/cohere/resources/connectors/client.py index caf9d3b87..fec0253a5 100644 --- a/src/cohere/resources/connectors/client.py +++ b/src/cohere/resources/connectors/client.py @@ -8,6 +8,7 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ...core.jsonable_encoder import jsonable_encoder from ...core.remove_none_from_dict import remove_none_from_dict +from ...core.request_options import RequestOptions from ...errors.bad_request_error import BadRequestError from ...errors.forbidden_error import ForbiddenError from ...errors.internal_server_error import InternalServerError @@ -36,7 +37,11 @@ def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper def list( - self, *, limit: typing.Optional[float] = None, offset: typing.Optional[float] = None + self, + *, + limit: typing.Optional[float] = None, + offset: typing.Optional[float] = None, + request_options: typing.Optional[RequestOptions] = None, ) -> ListConnectorsResponse: """ Returns a list of connectors ordered by descending creation date (newer first). See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. @@ -45,6 +50,8 @@ def list( - limit: typing.Optional[float]. Maximum number of connectors to return [0, 100]. - offset: typing.Optional[float]. Number of connectors to skip before returning results [0, inf]. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -56,10 +63,31 @@ def list( """ _response = self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/connectors"), - params=remove_none_from_dict({"limit": limit, "offset": offset}), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "connectors"), + params=jsonable_encoder( + remove_none_from_dict( + { + "limit": limit, + "offset": offset, + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + } + ) + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(ListConnectorsResponse, _response.json()) # type: ignore @@ -86,6 +114,7 @@ def create( active: typing.Optional[bool] = OMIT, continue_on_failure: typing.Optional[bool] = OMIT, service_auth: typing.Optional[CreateConnectorServiceAuth] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> CreateConnectorResponse: """ Creates a new connector. The connector is tested during registration and will cancel registration when the test is unsuccessful. See ['Creating and Deploying a Connector'](https://docs.cohere.com/docs/creating-and-deploying-a-connector) for more information. @@ -106,12 +135,9 @@ def create( - continue_on_failure: typing.Optional[bool]. Whether a chat request should continue or not if the request to this connector fails. - service_auth: typing.Optional[CreateConnectorServiceAuth]. The service to service authentication configuration for the connector. Cannot be specified if oauth is specified. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import ( - AuthTokenType, - CreateConnectorOAuth, - CreateConnectorServiceAuth, - ) from cohere.client import Client client = Client( @@ -119,13 +145,8 @@ def create( token="YOUR_TOKEN", ) client.connectors.create( - name="string", - url="string", - oauth=CreateConnectorOAuth(), - service_auth=CreateConnectorServiceAuth( - type=AuthTokenType.BEARER, - token="string", - ), + name="name", + url="url", ) """ _request: typing.Dict[str, typing.Any] = {"name": name, "url": url} @@ -143,10 +164,27 @@ def create( _request["service_auth"] = service_auth _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/connectors"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "connectors"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(CreateConnectorResponse, _response.json()) # type: ignore @@ -164,12 +202,14 @@ def create( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get(self, id: str) -> GetConnectorResponse: + def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> GetConnectorResponse: """ Retrieve a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. Parameters: - id: str. The ID of the connector to retrieve. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -178,14 +218,26 @@ def get(self, id: str) -> GetConnectorResponse: token="YOUR_TOKEN", ) client.connectors.get( - id="string", + id="id", ) """ _response = self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/connectors/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"connectors/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(GetConnectorResponse, _response.json()) # type: ignore @@ -203,12 +255,14 @@ def get(self, id: str) -> GetConnectorResponse: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def delete(self, id: str) -> DeleteConnectorResponse: + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> DeleteConnectorResponse: """ Delete a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. Parameters: - id: str. The ID of the connector to delete. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -217,14 +271,26 @@ def delete(self, id: str) -> DeleteConnectorResponse: token="YOUR_TOKEN", ) client.connectors.delete( - id="string", + id="id", ) """ _response = self._client_wrapper.httpx_client.request( "DELETE", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/connectors/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"connectors/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DeleteConnectorResponse, _response.json()) # type: ignore @@ -255,6 +321,7 @@ def update( active: typing.Optional[bool] = OMIT, continue_on_failure: typing.Optional[bool] = OMIT, service_auth: typing.Optional[CreateConnectorServiceAuth] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> UpdateConnectorResponse: """ Update a connector by ID. Omitted fields will not be updated. See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. @@ -275,12 +342,9 @@ def update( - continue_on_failure: typing.Optional[bool]. - service_auth: typing.Optional[CreateConnectorServiceAuth]. The service to service authentication configuration for the connector. Cannot be specified if oauth is specified. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import ( - AuthTokenType, - CreateConnectorOAuth, - CreateConnectorServiceAuth, - ) from cohere.client import Client client = Client( @@ -288,12 +352,7 @@ def update( token="YOUR_TOKEN", ) client.connectors.update( - id="string", - oauth=CreateConnectorOAuth(), - service_auth=CreateConnectorServiceAuth( - type=AuthTokenType.BEARER, - token="string", - ), + id="id", ) """ _request: typing.Dict[str, typing.Any] = {} @@ -313,10 +372,27 @@ def update( _request["service_auth"] = service_auth _response = self._client_wrapper.httpx_client.request( "PATCH", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/connectors/{id}"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"connectors/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(UpdateConnectorResponse, _response.json()) # type: ignore @@ -336,7 +412,13 @@ def update( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def o_auth_authorize(self, id: str, *, after_token_redirect: typing.Optional[str] = None) -> OAuthAuthorizeResponse: + def o_auth_authorize( + self, + id: str, + *, + after_token_redirect: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> OAuthAuthorizeResponse: """ Authorize the connector with the given ID for the connector oauth app. See ['Connector Authentication'](https://docs.cohere.com/docs/connector-authentication) for more information. @@ -344,6 +426,8 @@ def o_auth_authorize(self, id: str, *, after_token_redirect: typing.Optional[str - id: str. The ID of the connector to authorize. - after_token_redirect: typing.Optional[str]. The URL to redirect to after the connector has been authorized. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -352,15 +436,38 @@ def o_auth_authorize(self, id: str, *, after_token_redirect: typing.Optional[str token="YOUR_TOKEN", ) client.connectors.o_auth_authorize( - id="string", + id="id", ) """ _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/connectors/{id}/oauth/authorize"), - params=remove_none_from_dict({"after_token_redirect": after_token_redirect}), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"connectors/{id}/oauth/authorize"), + params=jsonable_encoder( + remove_none_from_dict( + { + "after_token_redirect": after_token_redirect, + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + } + ) + ), + json=jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {}))) + if request_options is not None + else None, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(OAuthAuthorizeResponse, _response.json()) # type: ignore @@ -384,7 +491,11 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper async def list( - self, *, limit: typing.Optional[float] = None, offset: typing.Optional[float] = None + self, + *, + limit: typing.Optional[float] = None, + offset: typing.Optional[float] = None, + request_options: typing.Optional[RequestOptions] = None, ) -> ListConnectorsResponse: """ Returns a list of connectors ordered by descending creation date (newer first). See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. @@ -393,6 +504,8 @@ async def list( - limit: typing.Optional[float]. Maximum number of connectors to return [0, 100]. - offset: typing.Optional[float]. Number of connectors to skip before returning results [0, inf]. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -404,10 +517,31 @@ async def list( """ _response = await self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/connectors"), - params=remove_none_from_dict({"limit": limit, "offset": offset}), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "connectors"), + params=jsonable_encoder( + remove_none_from_dict( + { + "limit": limit, + "offset": offset, + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + } + ) + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(ListConnectorsResponse, _response.json()) # type: ignore @@ -434,6 +568,7 @@ async def create( active: typing.Optional[bool] = OMIT, continue_on_failure: typing.Optional[bool] = OMIT, service_auth: typing.Optional[CreateConnectorServiceAuth] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> CreateConnectorResponse: """ Creates a new connector. The connector is tested during registration and will cancel registration when the test is unsuccessful. See ['Creating and Deploying a Connector'](https://docs.cohere.com/docs/creating-and-deploying-a-connector) for more information. @@ -454,12 +589,9 @@ async def create( - continue_on_failure: typing.Optional[bool]. Whether a chat request should continue or not if the request to this connector fails. - service_auth: typing.Optional[CreateConnectorServiceAuth]. The service to service authentication configuration for the connector. Cannot be specified if oauth is specified. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import ( - AuthTokenType, - CreateConnectorOAuth, - CreateConnectorServiceAuth, - ) from cohere.client import AsyncClient client = AsyncClient( @@ -467,13 +599,8 @@ async def create( token="YOUR_TOKEN", ) await client.connectors.create( - name="string", - url="string", - oauth=CreateConnectorOAuth(), - service_auth=CreateConnectorServiceAuth( - type=AuthTokenType.BEARER, - token="string", - ), + name="name", + url="url", ) """ _request: typing.Dict[str, typing.Any] = {"name": name, "url": url} @@ -491,10 +618,27 @@ async def create( _request["service_auth"] = service_auth _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/connectors"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "connectors"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(CreateConnectorResponse, _response.json()) # type: ignore @@ -512,12 +656,14 @@ async def create( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get(self, id: str) -> GetConnectorResponse: + async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> GetConnectorResponse: """ Retrieve a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. Parameters: - id: str. The ID of the connector to retrieve. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -526,14 +672,26 @@ async def get(self, id: str) -> GetConnectorResponse: token="YOUR_TOKEN", ) await client.connectors.get( - id="string", + id="id", ) """ _response = await self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/connectors/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"connectors/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(GetConnectorResponse, _response.json()) # type: ignore @@ -551,12 +709,16 @@ async def get(self, id: str) -> GetConnectorResponse: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def delete(self, id: str) -> DeleteConnectorResponse: + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DeleteConnectorResponse: """ Delete a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. Parameters: - id: str. The ID of the connector to delete. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -565,14 +727,26 @@ async def delete(self, id: str) -> DeleteConnectorResponse: token="YOUR_TOKEN", ) await client.connectors.delete( - id="string", + id="id", ) """ _response = await self._client_wrapper.httpx_client.request( "DELETE", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/connectors/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"connectors/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DeleteConnectorResponse, _response.json()) # type: ignore @@ -603,6 +777,7 @@ async def update( active: typing.Optional[bool] = OMIT, continue_on_failure: typing.Optional[bool] = OMIT, service_auth: typing.Optional[CreateConnectorServiceAuth] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> UpdateConnectorResponse: """ Update a connector by ID. Omitted fields will not be updated. See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. @@ -623,12 +798,9 @@ async def update( - continue_on_failure: typing.Optional[bool]. - service_auth: typing.Optional[CreateConnectorServiceAuth]. The service to service authentication configuration for the connector. Cannot be specified if oauth is specified. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere import ( - AuthTokenType, - CreateConnectorOAuth, - CreateConnectorServiceAuth, - ) from cohere.client import AsyncClient client = AsyncClient( @@ -636,12 +808,7 @@ async def update( token="YOUR_TOKEN", ) await client.connectors.update( - id="string", - oauth=CreateConnectorOAuth(), - service_auth=CreateConnectorServiceAuth( - type=AuthTokenType.BEARER, - token="string", - ), + id="id", ) """ _request: typing.Dict[str, typing.Any] = {} @@ -661,10 +828,27 @@ async def update( _request["service_auth"] = service_auth _response = await self._client_wrapper.httpx_client.request( "PATCH", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/connectors/{id}"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"connectors/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(UpdateConnectorResponse, _response.json()) # type: ignore @@ -685,7 +869,11 @@ async def update( raise ApiError(status_code=_response.status_code, body=_response_json) async def o_auth_authorize( - self, id: str, *, after_token_redirect: typing.Optional[str] = None + self, + id: str, + *, + after_token_redirect: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, ) -> OAuthAuthorizeResponse: """ Authorize the connector with the given ID for the connector oauth app. See ['Connector Authentication'](https://docs.cohere.com/docs/connector-authentication) for more information. @@ -694,6 +882,8 @@ async def o_auth_authorize( - id: str. The ID of the connector to authorize. - after_token_redirect: typing.Optional[str]. The URL to redirect to after the connector has been authorized. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -702,15 +892,38 @@ async def o_auth_authorize( token="YOUR_TOKEN", ) await client.connectors.o_auth_authorize( - id="string", + id="id", ) """ _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/connectors/{id}/oauth/authorize"), - params=remove_none_from_dict({"after_token_redirect": after_token_redirect}), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"connectors/{id}/oauth/authorize"), + params=jsonable_encoder( + remove_none_from_dict( + { + "after_token_redirect": after_token_redirect, + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + } + ) + ), + json=jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {}))) + if request_options is not None + else None, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(OAuthAuthorizeResponse, _response.json()) # type: ignore diff --git a/src/cohere/resources/datasets/client.py b/src/cohere/resources/datasets/client.py index 39adef645..b04d55924 100644 --- a/src/cohere/resources/datasets/client.py +++ b/src/cohere/resources/datasets/client.py @@ -10,6 +10,7 @@ from ...core.datetime_utils import serialize_datetime from ...core.jsonable_encoder import jsonable_encoder from ...core.remove_none_from_dict import remove_none_from_dict +from ...core.request_options import RequestOptions from ...errors.too_many_requests_error import TooManyRequestsError from ...types.dataset_type import DatasetType from .types.datasets_create_response import DatasetsCreateResponse @@ -38,6 +39,7 @@ def list( after: typing.Optional[dt.datetime] = None, limit: typing.Optional[str] = None, offset: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, ) -> DatasetsListResponse: """ List datasets that have been created. @@ -52,6 +54,8 @@ def list( - limit: typing.Optional[str]. optional limit to number of results - offset: typing.Optional[str]. optional offset to start of results + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -63,18 +67,34 @@ def list( """ _response = self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/datasets"), - params=remove_none_from_dict( - { - "datasetType": dataset_type, - "before": serialize_datetime(before) if before is not None else None, - "after": serialize_datetime(after) if after is not None else None, - "limit": limit, - "offset": offset, - } + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "datasets"), + params=jsonable_encoder( + remove_none_from_dict( + { + "datasetType": dataset_type, + "before": serialize_datetime(before) if before is not None else None, + "after": serialize_datetime(after) if after is not None else None, + "limit": limit, + "offset": offset, + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + } + ) + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) ), - headers=self._client_wrapper.get_headers(), - timeout=60, + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DatasetsListResponse, _response.json()) # type: ignore @@ -97,8 +117,9 @@ def create( optional_fields: typing.Optional[typing.Union[str, typing.List[str]]] = None, text_separator: typing.Optional[str] = None, csv_delimiter: typing.Optional[str] = None, - data: typing.IO, - eval_data: typing.IO, + data: typing.Optional[typing.IO] = None, + eval_data: typing.Optional[typing.IO] = None, + request_options: typing.Optional[RequestOptions] = None, ) -> DatasetsCreateResponse: """ Create a dataset by uploading a file. See ['Dataset Creation'](https://docs.cohere.com/docs/datasets#dataset-creation) for more information. @@ -120,29 +141,52 @@ def create( - csv_delimiter: typing.Optional[str]. The delimiter used for .csv uploads. - - data: typing.IO. + - data: typing.Optional[typing.IO]. - - eval_data: typing.IO. + - eval_data: typing.Optional[typing.IO]. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. """ _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/datasets"), - params=remove_none_from_dict( - { - "name": name, - "type": type, - "keep_original_file": keep_original_file, - "skip_malformed_input": skip_malformed_input, - "keep_fields": keep_fields, - "optional_fields": optional_fields, - "text_separator": text_separator, - "csv_delimiter": csv_delimiter, - } + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "datasets"), + params=jsonable_encoder( + remove_none_from_dict( + { + "name": name, + "type": type, + "keep_original_file": keep_original_file, + "skip_malformed_input": skip_malformed_input, + "keep_fields": keep_fields, + "optional_fields": optional_fields, + "text_separator": text_separator, + "csv_delimiter": csv_delimiter, + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + } + ) + ), + data=jsonable_encoder(remove_none_from_dict({})) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(remove_none_from_dict({})), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + files=remove_none_from_dict({"data": data, "eval_data": eval_data}), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) ), - data=jsonable_encoder({}), - files={"data": data, "eval_data": eval_data}, - headers=self._client_wrapper.get_headers(), - timeout=60, + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DatasetsCreateResponse, _response.json()) # type: ignore @@ -154,10 +198,12 @@ def create( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_usage(self) -> DatasetsGetUsageResponse: + def get_usage(self, *, request_options: typing.Optional[RequestOptions] = None) -> DatasetsGetUsageResponse: """ View the dataset storage usage for your Organization. Each Organization can have up to 10GB of storage across all their users. + Parameters: + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -169,9 +215,21 @@ def get_usage(self) -> DatasetsGetUsageResponse: """ _response = self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/datasets/usage"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "datasets/usage"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DatasetsGetUsageResponse, _response.json()) # type: ignore @@ -183,12 +241,14 @@ def get_usage(self) -> DatasetsGetUsageResponse: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get(self, id: str) -> DatasetsGetResponse: + def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> DatasetsGetResponse: """ Retrieve a dataset by ID. See ['Datasets'](https://docs.cohere.com/docs/datasets) for more information. Parameters: - id: str. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -197,14 +257,26 @@ def get(self, id: str) -> DatasetsGetResponse: token="YOUR_TOKEN", ) client.datasets.get( - id="string", + id="id", ) """ _response = self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/datasets/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"datasets/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DatasetsGetResponse, _response.json()) # type: ignore @@ -216,12 +288,16 @@ def get(self, id: str) -> DatasetsGetResponse: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def delete(self, id: str) -> typing.Dict[str, typing.Any]: + def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Dict[str, typing.Any]: """ Delete a dataset by ID. Datasets are automatically deleted after 30 days, but they can also be deleted manually. Parameters: - id: str. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -230,14 +306,26 @@ def delete(self, id: str) -> typing.Dict[str, typing.Any]: token="YOUR_TOKEN", ) client.datasets.delete( - id="string", + id="id", ) """ _response = self._client_wrapper.httpx_client.request( "DELETE", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/datasets/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"datasets/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(typing.Dict[str, typing.Any], _response.json()) # type: ignore @@ -262,6 +350,7 @@ async def list( after: typing.Optional[dt.datetime] = None, limit: typing.Optional[str] = None, offset: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, ) -> DatasetsListResponse: """ List datasets that have been created. @@ -276,6 +365,8 @@ async def list( - limit: typing.Optional[str]. optional limit to number of results - offset: typing.Optional[str]. optional offset to start of results + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -287,18 +378,34 @@ async def list( """ _response = await self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/datasets"), - params=remove_none_from_dict( - { - "datasetType": dataset_type, - "before": serialize_datetime(before) if before is not None else None, - "after": serialize_datetime(after) if after is not None else None, - "limit": limit, - "offset": offset, - } + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "datasets"), + params=jsonable_encoder( + remove_none_from_dict( + { + "datasetType": dataset_type, + "before": serialize_datetime(before) if before is not None else None, + "after": serialize_datetime(after) if after is not None else None, + "limit": limit, + "offset": offset, + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + } + ) + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) ), - headers=self._client_wrapper.get_headers(), - timeout=60, + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DatasetsListResponse, _response.json()) # type: ignore @@ -321,8 +428,9 @@ async def create( optional_fields: typing.Optional[typing.Union[str, typing.List[str]]] = None, text_separator: typing.Optional[str] = None, csv_delimiter: typing.Optional[str] = None, - data: typing.IO, - eval_data: typing.IO, + data: typing.Optional[typing.IO] = None, + eval_data: typing.Optional[typing.IO] = None, + request_options: typing.Optional[RequestOptions] = None, ) -> DatasetsCreateResponse: """ Create a dataset by uploading a file. See ['Dataset Creation'](https://docs.cohere.com/docs/datasets#dataset-creation) for more information. @@ -344,29 +452,52 @@ async def create( - csv_delimiter: typing.Optional[str]. The delimiter used for .csv uploads. - - data: typing.IO. + - data: typing.Optional[typing.IO]. - - eval_data: typing.IO. + - eval_data: typing.Optional[typing.IO]. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. """ _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/datasets"), - params=remove_none_from_dict( - { - "name": name, - "type": type, - "keep_original_file": keep_original_file, - "skip_malformed_input": skip_malformed_input, - "keep_fields": keep_fields, - "optional_fields": optional_fields, - "text_separator": text_separator, - "csv_delimiter": csv_delimiter, - } + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "datasets"), + params=jsonable_encoder( + remove_none_from_dict( + { + "name": name, + "type": type, + "keep_original_file": keep_original_file, + "skip_malformed_input": skip_malformed_input, + "keep_fields": keep_fields, + "optional_fields": optional_fields, + "text_separator": text_separator, + "csv_delimiter": csv_delimiter, + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + } + ) + ), + data=jsonable_encoder(remove_none_from_dict({})) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(remove_none_from_dict({})), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + files=remove_none_from_dict({"data": data, "eval_data": eval_data}), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) ), - data=jsonable_encoder({}), - files={"data": data, "eval_data": eval_data}, - headers=self._client_wrapper.get_headers(), - timeout=60, + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DatasetsCreateResponse, _response.json()) # type: ignore @@ -378,10 +509,12 @@ async def create( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_usage(self) -> DatasetsGetUsageResponse: + async def get_usage(self, *, request_options: typing.Optional[RequestOptions] = None) -> DatasetsGetUsageResponse: """ View the dataset storage usage for your Organization. Each Organization can have up to 10GB of storage across all their users. + Parameters: + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -393,9 +526,21 @@ async def get_usage(self) -> DatasetsGetUsageResponse: """ _response = await self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/datasets/usage"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "datasets/usage"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DatasetsGetUsageResponse, _response.json()) # type: ignore @@ -407,12 +552,14 @@ async def get_usage(self) -> DatasetsGetUsageResponse: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get(self, id: str) -> DatasetsGetResponse: + async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> DatasetsGetResponse: """ Retrieve a dataset by ID. See ['Datasets'](https://docs.cohere.com/docs/datasets) for more information. Parameters: - id: str. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -421,14 +568,26 @@ async def get(self, id: str) -> DatasetsGetResponse: token="YOUR_TOKEN", ) await client.datasets.get( - id="string", + id="id", ) """ _response = await self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/datasets/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"datasets/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(DatasetsGetResponse, _response.json()) # type: ignore @@ -440,12 +599,16 @@ async def get(self, id: str) -> DatasetsGetResponse: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def delete(self, id: str) -> typing.Dict[str, typing.Any]: + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Dict[str, typing.Any]: """ Delete a dataset by ID. Datasets are automatically deleted after 30 days, but they can also be deleted manually. Parameters: - id: str. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -454,14 +617,26 @@ async def delete(self, id: str) -> typing.Dict[str, typing.Any]: token="YOUR_TOKEN", ) await client.datasets.delete( - id="string", + id="id", ) """ _response = await self._client_wrapper.httpx_client.request( "DELETE", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/datasets/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"datasets/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(typing.Dict[str, typing.Any], _response.json()) # type: ignore diff --git a/src/cohere/resources/embed_jobs/client.py b/src/cohere/resources/embed_jobs/client.py index 9031bb1a1..d34936be0 100644 --- a/src/cohere/resources/embed_jobs/client.py +++ b/src/cohere/resources/embed_jobs/client.py @@ -7,6 +7,8 @@ from ...core.api_error import ApiError from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ...core.jsonable_encoder import jsonable_encoder +from ...core.remove_none_from_dict import remove_none_from_dict +from ...core.request_options import RequestOptions from ...errors.bad_request_error import BadRequestError from ...errors.internal_server_error import InternalServerError from ...errors.not_found_error import NotFoundError @@ -30,10 +32,12 @@ class EmbedJobsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def list(self) -> ListEmbedJobResponse: + def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> ListEmbedJobResponse: """ The list embed job endpoint allows users to view all embed jobs history for that specific user. + Parameters: + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -45,9 +49,21 @@ def list(self) -> ListEmbedJobResponse: """ _response = self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/embed-jobs"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "embed-jobs"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(ListEmbedJobResponse, _response.json()) # type: ignore @@ -71,6 +87,7 @@ def create( input_type: EmbedInputType, name: typing.Optional[str] = OMIT, truncate: typing.Optional[CreateEmbedJobRequestTruncate] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> CreateEmbedJobResponse: """ This API launches an async Embed job for a [Dataset](https://docs.cohere.com/docs/datasets) of type `embed-input`. The result of a completed embed job is new Dataset of type `embed-output`, which contains the original text entries and the corresponding embeddings. @@ -94,8 +111,10 @@ def create( - truncate: typing.Optional[CreateEmbedJobRequestTruncate]. One of `START|END` to specify how the API will handle inputs longer than the maximum token length. Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. - --- - from cohere import CreateEmbedJobRequestTruncate, EmbedInputType + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere import EmbedInputType from cohere.client import Client client = Client( @@ -103,23 +122,43 @@ def create( token="YOUR_TOKEN", ) client.embed_jobs.create( - model="string", - dataset_id="string", + model="model", + dataset_id="dataset_id", input_type=EmbedInputType.SEARCH_DOCUMENT, - truncate=CreateEmbedJobRequestTruncate.START, ) """ - _request: typing.Dict[str, typing.Any] = {"model": model, "dataset_id": dataset_id, "input_type": input_type} + _request: typing.Dict[str, typing.Any] = { + "model": model, + "dataset_id": dataset_id, + "input_type": input_type.value, + } if name is not OMIT: _request["name"] = name if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/embed-jobs"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "embed-jobs"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(CreateEmbedJobResponse, _response.json()) # type: ignore @@ -135,12 +174,14 @@ def create( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get(self, id: str) -> EmbedJob: + def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EmbedJob: """ This API retrieves the details about an embed job started by the same user. Parameters: - id: str. The ID of the embed job to retrieve. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -149,14 +190,26 @@ def get(self, id: str) -> EmbedJob: token="YOUR_TOKEN", ) client.embed_jobs.get( - id="string", + id="id", ) """ _response = self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/embed-jobs/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"embed-jobs/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(EmbedJob, _response.json()) # type: ignore @@ -174,12 +227,14 @@ def get(self, id: str) -> EmbedJob: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def cancel(self, id: str) -> None: + def cancel(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ This API allows users to cancel an active embed job. Once invoked, the embedding process will be terminated, and users will be charged for the embeddings processed up to the cancellation point. It's important to note that partial results will not be available to users after cancellation. Parameters: - id: str. The ID of the embed job to cancel. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import Client @@ -188,14 +243,29 @@ def cancel(self, id: str) -> None: token="YOUR_TOKEN", ) client.embed_jobs.cancel( - id="string", + id="id", ) """ _response = self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/embed-jobs/{id}/cancel"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"embed-jobs/{id}/cancel"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {}))) + if request_options is not None + else None, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return @@ -218,10 +288,12 @@ class AsyncEmbedJobsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def list(self) -> ListEmbedJobResponse: + async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> ListEmbedJobResponse: """ The list embed job endpoint allows users to view all embed jobs history for that specific user. + Parameters: + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -233,9 +305,21 @@ async def list(self) -> ListEmbedJobResponse: """ _response = await self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/embed-jobs"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "embed-jobs"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(ListEmbedJobResponse, _response.json()) # type: ignore @@ -259,6 +343,7 @@ async def create( input_type: EmbedInputType, name: typing.Optional[str] = OMIT, truncate: typing.Optional[CreateEmbedJobRequestTruncate] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> CreateEmbedJobResponse: """ This API launches an async Embed job for a [Dataset](https://docs.cohere.com/docs/datasets) of type `embed-input`. The result of a completed embed job is new Dataset of type `embed-output`, which contains the original text entries and the corresponding embeddings. @@ -282,8 +367,10 @@ async def create( - truncate: typing.Optional[CreateEmbedJobRequestTruncate]. One of `START|END` to specify how the API will handle inputs longer than the maximum token length. Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. - --- - from cohere import CreateEmbedJobRequestTruncate, EmbedInputType + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere import EmbedInputType from cohere.client import AsyncClient client = AsyncClient( @@ -291,23 +378,43 @@ async def create( token="YOUR_TOKEN", ) await client.embed_jobs.create( - model="string", - dataset_id="string", + model="model", + dataset_id="dataset_id", input_type=EmbedInputType.SEARCH_DOCUMENT, - truncate=CreateEmbedJobRequestTruncate.START, ) """ - _request: typing.Dict[str, typing.Any] = {"model": model, "dataset_id": dataset_id, "input_type": input_type} + _request: typing.Dict[str, typing.Any] = { + "model": model, + "dataset_id": dataset_id, + "input_type": input_type.value, + } if name is not OMIT: _request["name"] = name if truncate is not OMIT: - _request["truncate"] = truncate + _request["truncate"] = truncate.value _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "v1/embed-jobs"), - json=jsonable_encoder(_request), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "embed-jobs"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(_request) + if request_options is None or request_options.get("additional_body_parameters") is None + else { + **jsonable_encoder(_request), + **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))), + }, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(CreateEmbedJobResponse, _response.json()) # type: ignore @@ -323,12 +430,14 @@ async def create( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get(self, id: str) -> EmbedJob: + async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EmbedJob: """ This API retrieves the details about an embed job started by the same user. Parameters: - id: str. The ID of the embed job to retrieve. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -337,14 +446,26 @@ async def get(self, id: str) -> EmbedJob: token="YOUR_TOKEN", ) await client.embed_jobs.get( - id="string", + id="id", ) """ _response = await self._client_wrapper.httpx_client.request( "GET", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/embed-jobs/{id}"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"embed-jobs/{id}"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return pydantic.parse_obj_as(EmbedJob, _response.json()) # type: ignore @@ -362,12 +483,14 @@ async def get(self, id: str) -> EmbedJob: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def cancel(self, id: str) -> None: + async def cancel(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ This API allows users to cancel an active embed job. Once invoked, the embedding process will be terminated, and users will be charged for the embeddings processed up to the cancellation point. It's important to note that partial results will not be available to users after cancellation. Parameters: - id: str. The ID of the embed job to cancel. + + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere.client import AsyncClient @@ -376,14 +499,29 @@ async def cancel(self, id: str) -> None: token="YOUR_TOKEN", ) await client.embed_jobs.cancel( - id="string", + id="id", ) """ _response = await self._client_wrapper.httpx_client.request( "POST", - urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"v1/embed-jobs/{id}/cancel"), - headers=self._client_wrapper.get_headers(), - timeout=60, + urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"embed-jobs/{id}/cancel"), + params=jsonable_encoder( + request_options.get("additional_query_parameters") if request_options is not None else None + ), + json=jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {}))) + if request_options is not None + else None, + headers=jsonable_encoder( + remove_none_from_dict( + { + **self._client_wrapper.get_headers(), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + timeout=request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else 60, ) if 200 <= _response.status_code < 300: return diff --git a/src/cohere/types/embed_response.py b/src/cohere/types/embed_response.py index 2bc9f8e46..17e111a06 100644 --- a/src/cohere/types/embed_response.py +++ b/src/cohere/types/embed_response.py @@ -4,14 +4,12 @@ import typing -import typing_extensions - from .embed_by_type_response import EmbedByTypeResponse from .embed_floats_response import EmbedFloatsResponse class EmbedResponse_EmbeddingsFloats(EmbedFloatsResponse): - response_type: typing_extensions.Literal["embeddings_floats"] + response_type: typing.Literal["embeddings_floats"] class Config: frozen = True @@ -20,7 +18,7 @@ class Config: class EmbedResponse_EmbeddingsByType(EmbedByTypeResponse): - response_type: typing_extensions.Literal["embeddings_by_type"] + response_type: typing.Literal["embeddings_by_type"] class Config: frozen = True diff --git a/src/cohere/types/generate_streamed_response.py b/src/cohere/types/generate_streamed_response.py index a5b307b7a..53eb80102 100644 --- a/src/cohere/types/generate_streamed_response.py +++ b/src/cohere/types/generate_streamed_response.py @@ -4,15 +4,13 @@ import typing -import typing_extensions - from .generate_stream_end import GenerateStreamEnd from .generate_stream_error import GenerateStreamError from .generate_stream_text import GenerateStreamText class GenerateStreamedResponse_TextGeneration(GenerateStreamText): - event_type: typing_extensions.Literal["text-generation"] + event_type: typing.Literal["text-generation"] class Config: frozen = True @@ -21,7 +19,7 @@ class Config: class GenerateStreamedResponse_StreamEnd(GenerateStreamEnd): - event_type: typing_extensions.Literal["stream-end"] + event_type: typing.Literal["stream-end"] class Config: frozen = True @@ -30,7 +28,7 @@ class Config: class GenerateStreamedResponse_StreamError(GenerateStreamError): - event_type: typing_extensions.Literal["stream-error"] + event_type: typing.Literal["stream-error"] class Config: frozen = True diff --git a/src/cohere/types/non_streamed_chat_response.py b/src/cohere/types/non_streamed_chat_response.py index 7ca3e2c81..54e910cca 100644 --- a/src/cohere/types/non_streamed_chat_response.py +++ b/src/cohere/types/non_streamed_chat_response.py @@ -17,7 +17,7 @@ class NonStreamedChatResponse(pydantic.BaseModel): text: str = pydantic.Field(description="Contents of the reply generated by the model.") - generation_id: str = pydantic.Field( + generation_id: typing.Optional[str] = pydantic.Field( description="Unique identifier for the generated reply. Useful for submitting feedback." ) citations: typing.Optional[typing.List[ChatCitation]] = pydantic.Field( @@ -26,6 +26,9 @@ class NonStreamedChatResponse(pydantic.BaseModel): documents: typing.Optional[typing.List[ChatDocument]] = pydantic.Field( description="Documents seen by the model when generating the reply." ) + is_search_required: typing.Optional[bool] = pydantic.Field( + description="Denotes that a search for documents is required during the RAG flow." + ) search_queries: typing.Optional[typing.List[ChatSearchQuery]] = pydantic.Field( description="Generated search queries, meant to be used as part of the RAG flow." ) diff --git a/src/cohere/types/streamed_chat_response.py b/src/cohere/types/streamed_chat_response.py index 7f9879b35..3a2d18c2b 100644 --- a/src/cohere/types/streamed_chat_response.py +++ b/src/cohere/types/streamed_chat_response.py @@ -4,8 +4,6 @@ import typing -import typing_extensions - from .chat_citation_generation_event import ChatCitationGenerationEvent from .chat_search_queries_generation_event import ChatSearchQueriesGenerationEvent from .chat_search_results_event import ChatSearchResultsEvent @@ -15,7 +13,7 @@ class StreamedChatResponse_StreamStart(ChatStreamStartEvent): - event_type: typing_extensions.Literal["stream-start"] + event_type: typing.Literal["stream-start"] class Config: frozen = True @@ -24,7 +22,7 @@ class Config: class StreamedChatResponse_SearchQueriesGeneration(ChatSearchQueriesGenerationEvent): - event_type: typing_extensions.Literal["search-queries-generation"] + event_type: typing.Literal["search-queries-generation"] class Config: frozen = True @@ -33,7 +31,7 @@ class Config: class StreamedChatResponse_SearchResults(ChatSearchResultsEvent): - event_type: typing_extensions.Literal["search-results"] + event_type: typing.Literal["search-results"] class Config: frozen = True @@ -42,7 +40,7 @@ class Config: class StreamedChatResponse_TextGeneration(ChatTextGenerationEvent): - event_type: typing_extensions.Literal["text-generation"] + event_type: typing.Literal["text-generation"] class Config: frozen = True @@ -51,7 +49,7 @@ class Config: class StreamedChatResponse_CitationGeneration(ChatCitationGenerationEvent): - event_type: typing_extensions.Literal["citation-generation"] + event_type: typing.Literal["citation-generation"] class Config: frozen = True @@ -60,7 +58,7 @@ class Config: class StreamedChatResponse_StreamEnd(ChatStreamEndEvent): - event_type: typing_extensions.Literal["stream-end"] + event_type: typing.Literal["stream-end"] class Config: frozen = True