diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 078b9e28..9127b1bd 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.8.0"
+ ".": "3.9.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 29f00c95..b4763701 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 188
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-f07d74847e620dfa26d8df40ea4680814af9bba381b3a57a7b6ed76ad49d85f8.yml
-openapi_spec_hash: e3553dc2abf2afd4368b736bcc32a289
-config_hash: b28984dd49d4baf1d68572efe83ac103
+configured_endpoints: 189
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-ca993fd0bc66f703323a773c92da75207266f7f9d8c54ddac2fbd271a3cdaf86.yml
+openapi_spec_hash: 35d7edb04aab2ab28bc7e5851a54b4e3
+config_hash: fad48c8ac796b240fe3b90181586d1a4
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bf60097a..fc39fcc7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,32 @@
# Changelog
+## 3.9.0 (2025-12-17)
+
+Full Changelog: [v3.8.0...v3.9.0](https://github.com/digitalocean/gradient-python/compare/v3.8.0...v3.9.0)
+
+### Features
+
+* **api:** update via SDK Studio ([4173864](https://github.com/digitalocean/gradient-python/commit/4173864db71088fb5a2e3fc8033462580bb66603))
+* **api:** update via SDK Studio ([f6b12b8](https://github.com/digitalocean/gradient-python/commit/f6b12b8a67014dd608d8260c056d1c75342edda6))
+* **api:** update via SDK Studio ([a9cd7d3](https://github.com/digitalocean/gradient-python/commit/a9cd7d3bc6e2e988901e31064a4e607059c7ac09))
+
+
+### Bug Fixes
+
+* ensure streams are always closed ([80881b5](https://github.com/digitalocean/gradient-python/commit/80881b5248ac8baa2b34043df1d20086f319d2d1))
+* **types:** allow pyright to infer TypedDict types within SequenceNotStr ([b400d38](https://github.com/digitalocean/gradient-python/commit/b400d3808dc93924d7d44b25714bb53ef220bfe8))
+* use async_to_httpx_files in patch method ([33d2306](https://github.com/digitalocean/gradient-python/commit/33d2306ee7211b7180ab156697159b9aa02d564e))
+
+
+### Chores
+
+* add missing docstrings ([9ac1364](https://github.com/digitalocean/gradient-python/commit/9ac136400dbd411b3d2177d20b255b0572861c48))
+* add Python 3.14 classifier and testing ([db08b3f](https://github.com/digitalocean/gradient-python/commit/db08b3fb9a7d07ff02a8d45804647ce7c1e34e5a))
+* **deps:** mypy 1.18.1 has a regression, pin to 1.17 ([4710dcd](https://github.com/digitalocean/gradient-python/commit/4710dcdcc4600546a048e2769abeee056d9383f6))
+* **docs:** use environment variables for authentication in code snippets ([47b051a](https://github.com/digitalocean/gradient-python/commit/47b051af6578df97f84464ae40f04f957a00160a))
+* **internal:** add missing files argument to base client ([8ffa56c](https://github.com/digitalocean/gradient-python/commit/8ffa56c38b3816d5598d83976030e1a8706ec45e))
+* update lockfile ([516734f](https://github.com/digitalocean/gradient-python/commit/516734f2d19eb314061fb27c049a878b8c766313))
+
## 3.8.0 (2025-11-20)
Full Changelog: [v3.7.0...v3.8.0](https://github.com/digitalocean/gradient-python/compare/v3.7.0...v3.8.0)
diff --git a/README.md b/README.md
index a3029a83..c69fdd1b 100644
--- a/README.md
+++ b/README.md
@@ -140,6 +140,7 @@ pip install gradient[aiohttp]
Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
```python
+import os
import asyncio
from gradient import DefaultAioHttpClient
from gradient import AsyncGradient
@@ -147,6 +148,9 @@ from gradient import AsyncGradient
async def main() -> None:
async with AsyncGradient(
+ model_access_key=os.environ.get(
+ "GRADIENT_MODEL_ACCESS_KEY"
+ ), # This is the default and can be omitted
http_client=DefaultAioHttpClient(),
) as client:
completion = await client.chat.completions.create(
diff --git a/api.md b/api.md
index e32fae32..caf241a4 100644
--- a/api.md
+++ b/api.md
@@ -1027,3 +1027,15 @@ Methods:
- client.nfs.snapshots.retrieve(nfs_snapshot_id, \*\*params) -> SnapshotRetrieveResponse
- client.nfs.snapshots.list(\*\*params) -> SnapshotListResponse
- client.nfs.snapshots.delete(nfs_snapshot_id, \*\*params) -> None
+
+# Retrieve
+
+Types:
+
+```python
+from gradient.types import RetrieveDocumentsResponse
+```
+
+Methods:
+
+- client.retrieve.documents(knowledge_base_id, \*\*params) -> RetrieveDocumentsResponse
diff --git a/examples/knowledge_base_indexing_wait.py b/examples/knowledge_base_indexing_wait.py
index 1171fea3..94550ee2 100644
--- a/examples/knowledge_base_indexing_wait.py
+++ b/examples/knowledge_base_indexing_wait.py
@@ -43,8 +43,6 @@ def main() -> None:
print("\n✅ Indexing job completed successfully!")
if completed_job.job:
print(f"Phase: {completed_job.job.phase}")
- print(f"Total items indexed: {completed_job.job.total_items_indexed}")
- print(f"Total items failed: {completed_job.job.total_items_failed}")
print(f"Total datasources: {completed_job.job.total_datasources}")
print(f"Completed datasources: {completed_job.job.completed_datasources}")
diff --git a/pyproject.toml b/pyproject.toml
index 28cc09df..9cce1fde 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,10 +1,11 @@
[project]
name = "gradient"
-version = "3.8.0"
+version = "3.9.0"
description = "The official Python library for the Gradient API"
dynamic = ["readme"]
license = "Apache-2.0"
authors = [{ name = "DigitalOcean, LLC", email = "dev@digitalocean.com" }]
+
dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
@@ -13,6 +14,7 @@ dependencies = [
"distro>=1.7.0, <2",
"sniffio",
]
+
requires-python = ">= 3.9"
classifiers = [
"Typing :: Typed",
@@ -22,6 +24,7 @@ classifiers = [
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: MacOS",
@@ -43,7 +46,7 @@ managed = true
# version pins are in requirements-dev.lock
dev-dependencies = [
"pyright==1.1.399",
- "mypy",
+ "mypy==1.17",
"respx",
"pytest",
"pytest-asyncio",
diff --git a/requirements-dev.lock b/requirements-dev.lock
index e5307af8..63b7bd64 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -12,40 +12,45 @@
-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.12.8
+aiohttp==3.13.2
# via gradient
# via httpx-aiohttp
-aiosignal==1.3.2
+aiosignal==1.4.0
# via aiohttp
-annotated-types==0.6.0
+annotated-types==0.7.0
# via pydantic
-anyio==4.4.0
+anyio==4.12.0
# via gradient
# via httpx
-argcomplete==3.1.2
+argcomplete==3.6.3
# via nox
async-timeout==5.0.1
# via aiohttp
-attrs==25.3.0
+attrs==25.4.0
# via aiohttp
-certifi==2023.7.22
+ # via nox
+backports-asyncio-runner==1.2.0
+ # via pytest-asyncio
+certifi==2025.11.12
# via httpcore
# via httpx
-colorlog==6.7.0
+colorlog==6.10.1
+ # via nox
+dependency-groups==1.3.1
# via nox
-dirty-equals==0.6.0
-distlib==0.3.7
+dirty-equals==0.11
+distlib==0.4.0
# via virtualenv
-distro==1.8.0
+distro==1.9.0
# via gradient
-exceptiongroup==1.2.2
+exceptiongroup==1.3.1
# via anyio
# via pytest
-execnet==2.1.1
+execnet==2.1.2
# via pytest-xdist
-filelock==3.12.4
+filelock==3.19.1
# via virtualenv
-frozenlist==1.6.2
+frozenlist==1.8.0
# via aiohttp
# via aiosignal
h11==0.16.0
@@ -58,80 +63,87 @@ httpx==0.28.1
# via respx
httpx-aiohttp==0.1.9
# via gradient
-idna==3.4
+humanize==4.13.0
+ # via nox
+idna==3.11
# via anyio
# via httpx
# via yarl
-importlib-metadata==7.0.0
-iniconfig==2.0.0
+importlib-metadata==8.7.0
+iniconfig==2.1.0
# via pytest
markdown-it-py==3.0.0
# via rich
mdurl==0.1.2
# via markdown-it-py
-multidict==6.4.4
+multidict==6.7.0
# via aiohttp
# via yarl
-mypy==1.14.1
-mypy-extensions==1.0.0
+mypy==1.17.0
+mypy-extensions==1.1.0
# via mypy
-nodeenv==1.8.0
+nodeenv==1.9.1
# via pyright
-nox==2023.4.22
-packaging==23.2
+nox==2025.11.12
+packaging==25.0
+ # via dependency-groups
# via nox
# via pytest
-platformdirs==3.11.0
+pathspec==0.12.1
+ # via mypy
+platformdirs==4.4.0
# via virtualenv
-pluggy==1.5.0
+pluggy==1.6.0
# via pytest
-propcache==0.3.1
+propcache==0.4.1
# via aiohttp
# via yarl
-pydantic==2.11.9
+pydantic==2.12.5
# via gradient
-pydantic-core==2.33.2
+pydantic-core==2.41.5
# via pydantic
-pygments==2.18.0
+pygments==2.19.2
+ # via pytest
# via rich
pyright==1.1.399
-pytest==8.3.3
+pytest==8.4.2
# via pytest-asyncio
# via pytest-xdist
-pytest-asyncio==0.24.0
-pytest-xdist==3.7.0
-python-dateutil==2.8.2
+pytest-asyncio==1.2.0
+pytest-xdist==3.8.0
+python-dateutil==2.9.0.post0
# via time-machine
-pytz==2023.3.post1
- # via dirty-equals
respx==0.22.0
-rich==13.7.1
-ruff==0.9.4
-setuptools==68.2.2
- # via nodeenv
-six==1.16.0
+rich==14.2.0
+ruff==0.14.7
+six==1.17.0
# via python-dateutil
-sniffio==1.3.0
- # via anyio
+sniffio==1.3.1
# via gradient
-time-machine==2.9.0
-tomli==2.0.2
+time-machine==2.19.0
+tomli==2.3.0
+ # via dependency-groups
# via mypy
+ # via nox
# via pytest
-typing-extensions==4.12.2
+typing-extensions==4.15.0
+ # via aiosignal
# via anyio
+ # via exceptiongroup
# via gradient
# via multidict
# via mypy
# via pydantic
# via pydantic-core
# via pyright
+ # via pytest-asyncio
# via typing-inspection
-typing-inspection==0.4.1
+ # via virtualenv
+typing-inspection==0.4.2
# via pydantic
-virtualenv==20.24.5
+virtualenv==20.35.4
# via nox
-yarl==1.20.0
+yarl==1.22.0
# via aiohttp
-zipp==3.17.0
+zipp==3.23.0
# via importlib-metadata
diff --git a/requirements.lock b/requirements.lock
index 8c60e6c5..b2623a7b 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -12,28 +12,28 @@
-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.12.8
+aiohttp==3.13.2
# via gradient
# via httpx-aiohttp
-aiosignal==1.3.2
+aiosignal==1.4.0
# via aiohttp
-annotated-types==0.6.0
+annotated-types==0.7.0
# via pydantic
-anyio==4.4.0
+anyio==4.12.0
# via gradient
# via httpx
async-timeout==5.0.1
# via aiohttp
-attrs==25.3.0
+attrs==25.4.0
# via aiohttp
-certifi==2023.7.22
+certifi==2025.11.12
# via httpcore
# via httpx
-distro==1.8.0
+distro==1.9.0
# via gradient
-exceptiongroup==1.2.2
+exceptiongroup==1.3.1
# via anyio
-frozenlist==1.6.2
+frozenlist==1.8.0
# via aiohttp
# via aiosignal
h11==0.16.0
@@ -45,31 +45,32 @@ httpx==0.28.1
# via httpx-aiohttp
httpx-aiohttp==0.1.9
# via gradient
-idna==3.4
+idna==3.11
# via anyio
# via httpx
# via yarl
-multidict==6.4.4
+multidict==6.7.0
# via aiohttp
# via yarl
-propcache==0.3.1
+propcache==0.4.1
# via aiohttp
# via yarl
-pydantic==2.11.9
+pydantic==2.12.5
# via gradient
-pydantic-core==2.33.2
+pydantic-core==2.41.5
# via pydantic
-sniffio==1.3.0
- # via anyio
+sniffio==1.3.1
# via gradient
-typing-extensions==4.12.2
+typing-extensions==4.15.0
+ # via aiosignal
# via anyio
+ # via exceptiongroup
# via gradient
# via multidict
# via pydantic
# via pydantic-core
# via typing-inspection
-typing-inspection==0.4.1
+typing-inspection==0.4.2
# via pydantic
-yarl==1.20.0
+yarl==1.22.0
# via aiohttp
diff --git a/src/gradient/_base_client.py b/src/gradient/_base_client.py
index c6d52884..f038b215 100644
--- a/src/gradient/_base_client.py
+++ b/src/gradient/_base_client.py
@@ -1267,9 +1267,12 @@ def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
+ opts = FinalRequestOptions.construct(
+ method="patch", url=path, json_data=body, files=to_httpx_files(files), **options
+ )
return self.request(cast_to, opts)
def put(
@@ -1805,9 +1808,12 @@ async def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
+ opts = FinalRequestOptions.construct(
+ method="patch", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ )
return await self.request(cast_to, opts)
async def put(
diff --git a/src/gradient/_client.py b/src/gradient/_client.py
index f7891ddf..47fae3cd 100644
--- a/src/gradient/_client.py
+++ b/src/gradient/_client.py
@@ -39,6 +39,7 @@
images,
models,
regions,
+ retrieve,
databases,
inference,
gpu_droplets,
@@ -47,6 +48,7 @@
from .resources.images import ImagesResource, AsyncImagesResource
from .resources.nfs.nfs import NfsResource, AsyncNfsResource
from .resources.regions import RegionsResource, AsyncRegionsResource
+ from .resources.retrieve import RetrieveResource, AsyncRetrieveResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.gpu_droplets import (
GPUDropletsResource,
@@ -80,6 +82,7 @@ class Gradient(SyncAPIClient):
agent_access_key: str | None
_agent_endpoint: str | None
inference_endpoint: str | None
+ kbass_endpoint: str | None
def __init__(
self,
@@ -89,6 +92,7 @@ def __init__(
agent_access_key: str | None = None,
agent_endpoint: str | None = None,
inference_endpoint: str | None = None,
+ kbass_endpoint: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = not_given,
max_retries: int = DEFAULT_MAX_RETRIES,
@@ -119,6 +123,7 @@ def __init__(
- `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY`
- `agent_endpoint` from `GRADIENT_AGENT_ENDPOINT`
- `inference_endpoint` from `GRADIENT_INFERENCE_ENDPOINT`
+ - `kbass_endpoint` from `GRADIENT_KBASS_ENDPOINT`
"""
if access_token is None:
access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN")
@@ -143,6 +148,10 @@ def __init__(
)
self.inference_endpoint = inference_endpoint
+ if kbass_endpoint is None:
+ kbass_endpoint = os.environ.get("GRADIENT_KBASS_ENDPOINT") or "kbaas.do-ai.run"
+ self.kbass_endpoint = kbass_endpoint
+
if base_url is None:
base_url = os.environ.get("GRADIENT_BASE_URL")
self._base_url_overridden = base_url is not None
@@ -237,6 +246,12 @@ def nfs(self) -> NfsResource:
return NfsResource(self)
+ @cached_property
+ def retrieve(self) -> RetrieveResource:
+ from .resources.retrieve import RetrieveResource
+
+ return RetrieveResource(self)
+
@cached_property
def with_raw_response(self) -> GradientWithRawResponse:
return GradientWithRawResponse(self)
@@ -326,6 +341,7 @@ def copy(
agent_access_key: str | None = None,
agent_endpoint: str | None = None,
inference_endpoint: str | None = None,
+ kbass_endpoint: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.Client | None = None,
@@ -370,6 +386,7 @@ def copy(
agent_access_key=agent_access_key or self.agent_access_key,
agent_endpoint=agent_endpoint or self._agent_endpoint,
inference_endpoint=inference_endpoint or self.inference_endpoint,
+ kbass_endpoint=kbass_endpoint or self.kbass_endpoint,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
@@ -436,6 +453,7 @@ class AsyncGradient(AsyncAPIClient):
agent_access_key: str | None
_agent_endpoint: str | None
inference_endpoint: str | None
+ kbass_endpoint: str | None
def __init__(
self,
@@ -445,6 +463,7 @@ def __init__(
agent_access_key: str | None = None,
agent_endpoint: str | None = None,
inference_endpoint: str | None = None,
+ kbass_endpoint: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = not_given,
max_retries: int = DEFAULT_MAX_RETRIES,
@@ -475,6 +494,7 @@ def __init__(
- `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY`
- `agent_endpoint` from `GRADIENT_AGENT_ENDPOINT`
- `inference_endpoint` from `GRADIENT_INFERENCE_ENDPOINT`
+ - `kbass_endpoint` from `GRADIENT_KBASS_ENDPOINT`
"""
if access_token is None:
access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN")
@@ -499,6 +519,10 @@ def __init__(
)
self.inference_endpoint = inference_endpoint
+ if kbass_endpoint is None:
+ kbass_endpoint = os.environ.get("GRADIENT_KBASS_ENDPOINT") or "kbaas.do-ai.run"
+ self.kbass_endpoint = kbass_endpoint
+
if base_url is None:
base_url = os.environ.get("GRADIENT_BASE_URL")
self._base_url_overridden = base_url is not None
@@ -593,6 +617,12 @@ def nfs(self) -> AsyncNfsResource:
return AsyncNfsResource(self)
+ @cached_property
+ def retrieve(self) -> AsyncRetrieveResource:
+ from .resources.retrieve import AsyncRetrieveResource
+
+ return AsyncRetrieveResource(self)
+
@cached_property
def with_raw_response(self) -> AsyncGradientWithRawResponse:
return AsyncGradientWithRawResponse(self)
@@ -682,6 +712,7 @@ def copy(
model_access_key: str | None = None,
agent_access_key: str | None = None,
inference_endpoint: str | None = None,
+ kbass_endpoint: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.AsyncClient | None = None,
@@ -726,6 +757,7 @@ def copy(
agent_access_key=agent_access_key or self.agent_access_key,
agent_endpoint=agent_endpoint or self._agent_endpoint,
inference_endpoint=inference_endpoint or self.inference_endpoint,
+ kbass_endpoint=kbass_endpoint or self.kbass_endpoint,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
@@ -851,6 +883,12 @@ def nfs(self) -> nfs.NfsResourceWithRawResponse:
return NfsResourceWithRawResponse(self._client.nfs)
+ @cached_property
+ def retrieve(self) -> retrieve.RetrieveResourceWithRawResponse:
+ from .resources.retrieve import RetrieveResourceWithRawResponse
+
+ return RetrieveResourceWithRawResponse(self._client.retrieve)
+
class AsyncGradientWithRawResponse:
_client: AsyncGradient
@@ -922,6 +960,12 @@ def nfs(self) -> nfs.AsyncNfsResourceWithRawResponse:
return AsyncNfsResourceWithRawResponse(self._client.nfs)
+ @cached_property
+ def retrieve(self) -> retrieve.AsyncRetrieveResourceWithRawResponse:
+ from .resources.retrieve import AsyncRetrieveResourceWithRawResponse
+
+ return AsyncRetrieveResourceWithRawResponse(self._client.retrieve)
+
class GradientWithStreamedResponse:
_client: Gradient
@@ -993,6 +1037,12 @@ def nfs(self) -> nfs.NfsResourceWithStreamingResponse:
return NfsResourceWithStreamingResponse(self._client.nfs)
+ @cached_property
+ def retrieve(self) -> retrieve.RetrieveResourceWithStreamingResponse:
+ from .resources.retrieve import RetrieveResourceWithStreamingResponse
+
+ return RetrieveResourceWithStreamingResponse(self._client.retrieve)
+
class AsyncGradientWithStreamedResponse:
_client: AsyncGradient
@@ -1070,6 +1120,12 @@ def nfs(self) -> nfs.AsyncNfsResourceWithStreamingResponse:
return AsyncNfsResourceWithStreamingResponse(self._client.nfs)
+ @cached_property
+ def retrieve(self) -> retrieve.AsyncRetrieveResourceWithStreamingResponse:
+ from .resources.retrieve import AsyncRetrieveResourceWithStreamingResponse
+
+ return AsyncRetrieveResourceWithStreamingResponse(self._client.retrieve)
+
Client = Gradient
diff --git a/src/gradient/_streaming.py b/src/gradient/_streaming.py
index df2a5870..f0516264 100644
--- a/src/gradient/_streaming.py
+++ b/src/gradient/_streaming.py
@@ -55,29 +55,30 @@ def __stream__(self) -> Iterator[_T]:
process_data = self._client._process_response_data
iterator = self._iter_events()
- for sse in iterator:
- if sse.data.startswith("[DONE]"):
- break
-
- data = sse.json()
- if is_mapping(data) and data.get("error"):
- message = None
- error = data.get("error")
- if is_mapping(error):
- message = error.get("message")
- if not message or not isinstance(message, str):
- message = "An error occurred during streaming"
-
- raise APIError(
- message=message,
- request=self.response.request,
- body=data["error"],
- )
-
- yield process_data(data=data, cast_to=cast_to, response=response)
-
- # As we might not fully consume the response stream, we need to close it explicitly
- response.close()
+ try:
+ for sse in iterator:
+ if sse.data.startswith("[DONE]"):
+ break
+
+ data = sse.json()
+ if is_mapping(data) and data.get("error"):
+ message = None
+ error = data.get("error")
+ if is_mapping(error):
+ message = error.get("message")
+ if not message or not isinstance(message, str):
+ message = "An error occurred during streaming"
+
+ raise APIError(
+ message=message,
+ request=self.response.request,
+ body=data["error"],
+ )
+
+ yield process_data(data=data, cast_to=cast_to, response=response)
+ finally:
+ # Ensure the response is closed even if the consumer doesn't read all data
+ response.close()
def __enter__(self) -> Self:
return self
@@ -136,29 +137,30 @@ async def __stream__(self) -> AsyncIterator[_T]:
process_data = self._client._process_response_data
iterator = self._iter_events()
- async for sse in iterator:
- if sse.data.startswith("[DONE]"):
- break
-
- data = sse.json()
- if is_mapping(data) and data.get("error"):
- message = None
- error = data.get("error")
- if is_mapping(error):
- message = error.get("message")
- if not message or not isinstance(message, str):
- message = "An error occurred during streaming"
-
- raise APIError(
- message=message,
- request=self.response.request,
- body=data["error"],
- )
-
- yield process_data(data=data, cast_to=cast_to, response=response)
-
- # As we might not fully consume the response stream, we need to close it explicitly
- await response.aclose()
+ try:
+ async for sse in iterator:
+ if sse.data.startswith("[DONE]"):
+ break
+
+ data = sse.json()
+ if is_mapping(data) and data.get("error"):
+ message = None
+ error = data.get("error")
+ if is_mapping(error):
+ message = error.get("message")
+ if not message or not isinstance(message, str):
+ message = "An error occurred during streaming"
+
+ raise APIError(
+ message=message,
+ request=self.response.request,
+ body=data["error"],
+ )
+
+ yield process_data(data=data, cast_to=cast_to, response=response)
+ finally:
+ # Ensure the response is closed even if the consumer doesn't read all data
+ await response.aclose()
async def __aenter__(self) -> Self:
return self
diff --git a/src/gradient/_types.py b/src/gradient/_types.py
index 11a40997..65831fee 100644
--- a/src/gradient/_types.py
+++ b/src/gradient/_types.py
@@ -243,6 +243,9 @@ class HttpxSendArgs(TypedDict, total=False):
if TYPE_CHECKING:
# This works because str.__contains__ does not accept object (either in typeshed or at runtime)
# https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
+ #
+ # Note: index() and count() methods are intentionally omitted to allow pyright to properly
+ # infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr.
class SequenceNotStr(Protocol[_T_co]):
@overload
def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
@@ -251,8 +254,6 @@ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
def __contains__(self, value: object, /) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T_co]: ...
- def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
- def count(self, value: Any, /) -> int: ...
def __reversed__(self) -> Iterator[_T_co]: ...
else:
# just point this to a normal `Sequence` at runtime to avoid having to special case
diff --git a/src/gradient/_version.py b/src/gradient/_version.py
index defad636..abe0382c 100644
--- a/src/gradient/_version.py
+++ b/src/gradient/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "gradient"
-__version__ = "3.8.0" # x-release-please-version
+__version__ = "3.9.0" # x-release-please-version
diff --git a/src/gradient/resources/__init__.py b/src/gradient/resources/__init__.py
index a797b18f..f668bb06 100644
--- a/src/gradient/resources/__init__.py
+++ b/src/gradient/resources/__init__.py
@@ -48,6 +48,14 @@
RegionsResourceWithStreamingResponse,
AsyncRegionsResourceWithStreamingResponse,
)
+from .retrieve import (
+ RetrieveResource,
+ AsyncRetrieveResource,
+ RetrieveResourceWithRawResponse,
+ AsyncRetrieveResourceWithRawResponse,
+ RetrieveResourceWithStreamingResponse,
+ AsyncRetrieveResourceWithStreamingResponse,
+)
from .databases import (
DatabasesResource,
AsyncDatabasesResource,
@@ -142,4 +150,10 @@
"AsyncNfsResourceWithRawResponse",
"NfsResourceWithStreamingResponse",
"AsyncNfsResourceWithStreamingResponse",
+ "RetrieveResource",
+ "AsyncRetrieveResource",
+ "RetrieveResourceWithRawResponse",
+ "AsyncRetrieveResourceWithRawResponse",
+ "RetrieveResourceWithStreamingResponse",
+ "AsyncRetrieveResourceWithStreamingResponse",
]
diff --git a/src/gradient/resources/agents/chat/completions.py b/src/gradient/resources/agents/chat/completions.py
index 374fdc16..67a57557 100644
--- a/src/gradient/resources/agents/chat/completions.py
+++ b/src/gradient/resources/agents/chat/completions.py
@@ -472,9 +472,7 @@ def create(
headers = {"Authorization": f"Bearer {self._client.agent_access_key}", **headers}
return self._post(
- "/chat/completions?agent=true"
- if self._client._base_url_overridden
- else f"{self._client.agent_endpoint}/api/v1/chat/completions?agent=true",
+ "/chat/completions?agent=true",
body=maybe_transform(
{
"messages": messages,
@@ -958,9 +956,7 @@ async def create(
headers = {"Authorization": f"Bearer {self._client.agent_access_key}", **headers}
return await self._post(
- "/chat/completions?agent=true"
- if self._client._base_url_overridden
- else f"{self._client.agent_endpoint}/api/v1/chat/completions?agent=true",
+ "/chat/completions?agent=true",
body=await async_maybe_transform(
{
"messages": messages,
diff --git a/src/gradient/resources/chat/completions.py b/src/gradient/resources/chat/completions.py
index 779fffb0..8d0c43c5 100644
--- a/src/gradient/resources/chat/completions.py
+++ b/src/gradient/resources/chat/completions.py
@@ -473,9 +473,7 @@ def create(
}
return self._post(
- "/chat/completions"
- if self._client._base_url_overridden
- else f"{self._client.inference_endpoint}/v1/chat/completions",
+ "/chat/completions",
body=maybe_transform(
{
"messages": messages,
@@ -963,9 +961,7 @@ async def create(
}
return await self._post(
- "/chat/completions"
- if self._client._base_url_overridden
- else f"{self._client.inference_endpoint}/v1/chat/completions",
+ "/chat/completions",
body=await async_maybe_transform(
{
"messages": messages,
diff --git a/src/gradient/resources/images.py b/src/gradient/resources/images.py
index 14bbfeaa..ce7a7b48 100644
--- a/src/gradient/resources/images.py
+++ b/src/gradient/resources/images.py
@@ -317,9 +317,7 @@ def generate(
}
return self._post(
- "/images/generations"
- if self._client._base_url_overridden
- else f"{self._client.inference_endpoint}/v1/images/generations",
+ "/images/generations",
body=maybe_transform(
{
"prompt": prompt,
@@ -638,9 +636,7 @@ async def generate(
**headers,
}
return await self._post(
- "/images/generations"
- if self._client._base_url_overridden
- else f"{self._client.inference_endpoint}/v1/images/generations",
+ "/images/generations",
body=await async_maybe_transform(
{
"prompt": prompt,
diff --git a/src/gradient/resources/knowledge_bases/indexing_jobs.py b/src/gradient/resources/knowledge_bases/indexing_jobs.py
index 7936c73c..2759c3fa 100644
--- a/src/gradient/resources/knowledge_bases/indexing_jobs.py
+++ b/src/gradient/resources/knowledge_bases/indexing_jobs.py
@@ -367,9 +367,7 @@ def wait_for_completion(
# Failure states
if phase == "BATCH_JOB_PHASE_FAILED":
raise IndexingJobError(
- f"Indexing job {uuid} failed. "
- f"Total items indexed: {response.job.total_items_indexed}, "
- f"Total items failed: {response.job.total_items_failed}",
+ f"Indexing job {uuid} failed. ",
uuid=uuid,
phase=phase,
)
@@ -737,9 +735,7 @@ async def wait_for_completion(
# Failure states
if phase == "BATCH_JOB_PHASE_FAILED":
raise IndexingJobError(
- f"Indexing job {uuid} failed. "
- f"Total items indexed: {response.job.total_items_indexed}, "
- f"Total items failed: {response.job.total_items_failed}",
+ f"Indexing job {uuid} failed. ",
uuid=uuid,
phase=phase,
)
diff --git a/src/gradient/resources/nfs/nfs.py b/src/gradient/resources/nfs/nfs.py
index 1510bb69..a46df265 100644
--- a/src/gradient/resources/nfs/nfs.py
+++ b/src/gradient/resources/nfs/nfs.py
@@ -259,10 +259,12 @@ def initiate_action(
request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
`type` attribute to on of the supported action types:
- | Action | Details |
- | ----------------------- | -------------------------------------------------------------------------- |
- | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
- | `snapshot` | Takes a snapshot of an NFS share |
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
Args:
region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
@@ -299,10 +301,96 @@ def initiate_action(
request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
`type` attribute to on of the supported action types:
- | Action | Details |
- | ----------------------- | -------------------------------------------------------------------------- |
- | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
- | `snapshot` | Takes a snapshot of an NFS share |
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+
+ Args:
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionAttachParams | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+
+ Args:
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionDetachParams | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
Args:
region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
@@ -328,6 +416,8 @@ def initiate_action(
type: Literal["resize", "snapshot"],
params: nf_initiate_action_params.NfsActionResizeParams
| nf_initiate_action_params.NfsActionSnapshotParams
+ | nf_initiate_action_params.NfsActionAttachParams
+ | nf_initiate_action_params.NfsActionDetachParams
| Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -582,10 +672,12 @@ async def initiate_action(
request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
`type` attribute to on of the supported action types:
- | Action | Details |
- | ----------------------- | -------------------------------------------------------------------------- |
- | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
- | `snapshot` | Takes a snapshot of an NFS share |
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
Args:
region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
@@ -622,10 +714,96 @@ async def initiate_action(
request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
`type` attribute to on of the supported action types:
- | Action | Details |
- | ----------------------- | -------------------------------------------------------------------------- |
- | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
- | `snapshot` | Takes a snapshot of an NFS share |
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+
+ Args:
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionAttachParams | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+
+ Args:
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionDetachParams | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
Args:
region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
@@ -651,6 +829,8 @@ async def initiate_action(
type: Literal["resize", "snapshot"],
params: nf_initiate_action_params.NfsActionResizeParams
| nf_initiate_action_params.NfsActionSnapshotParams
+ | nf_initiate_action_params.NfsActionAttachParams
+ | nf_initiate_action_params.NfsActionDetachParams
| Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
diff --git a/src/gradient/resources/retrieve.py b/src/gradient/resources/retrieve.py
new file mode 100644
index 00000000..f0768350
--- /dev/null
+++ b/src/gradient/resources/retrieve.py
@@ -0,0 +1,249 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..types import retrieve_documents_params
+from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.retrieve_documents_response import RetrieveDocumentsResponse
+
+__all__ = ["RetrieveResource", "AsyncRetrieveResource"]
+
+
+class RetrieveResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RetrieveResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return RetrieveResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RetrieveResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return RetrieveResourceWithStreamingResponse(self)
+
+ def documents(
+ self,
+ knowledge_base_id: str,
+ *,
+ num_results: int,
+ query: str,
+ alpha: float | Omit = omit,
+ filters: retrieve_documents_params.Filters | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RetrieveDocumentsResponse:
+ """
+ Retrieve relevant documents from a knowledge base using semantic search.
+
+ This endpoint:
+
+ 1. Authenticates the request using the provided bearer token
+ 2. Generates embeddings for the query using the knowledge base's configured
+ model
+ 3. Performs vector similarity search in the knowledge base
+ 4. Returns the most relevant document chunks
+
+ The search supports hybrid search combining:
+
+ - Vector similarity (semantic search)
+ - Keyword matching (BM25)
+ - Custom metadata filters
+
+ Args:
+ num_results: Number of results to return
+
+ query: The search query text
+
+ alpha:
+ Weight for hybrid search (0-1):
+
+ - 0 = pure keyword search (BM25)
+ - 1 = pure vector search (default)
+ - 0.5 = balanced hybrid search
+
+ filters: Metadata filters to apply to the search
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_id:
+ raise ValueError(f"Expected a non-empty value for `knowledge_base_id` but received {knowledge_base_id!r}")
+ return self._post(
+ f"/{knowledge_base_id}/retrieve"
+ if self._client._base_url_overridden
+ else f"https://kbaas.do-ai.run/v1/{knowledge_base_id}/retrieve",
+ body=maybe_transform(
+ {
+ "num_results": num_results,
+ "query": query,
+ "alpha": alpha,
+ "filters": filters,
+ },
+ retrieve_documents_params.RetrieveDocumentsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RetrieveDocumentsResponse,
+ )
+
+
+class AsyncRetrieveResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRetrieveResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRetrieveResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRetrieveResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncRetrieveResourceWithStreamingResponse(self)
+
+ async def documents(
+ self,
+ knowledge_base_id: str,
+ *,
+ num_results: int,
+ query: str,
+ alpha: float | Omit = omit,
+ filters: retrieve_documents_params.Filters | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RetrieveDocumentsResponse:
+ """
+ Retrieve relevant documents from a knowledge base using semantic search.
+
+ This endpoint:
+
+ 1. Authenticates the request using the provided bearer token
+ 2. Generates embeddings for the query using the knowledge base's configured
+ model
+ 3. Performs vector similarity search in the knowledge base
+ 4. Returns the most relevant document chunks
+
+ The search supports hybrid search combining:
+
+ - Vector similarity (semantic search)
+ - Keyword matching (BM25)
+ - Custom metadata filters
+
+ Args:
+ num_results: Number of results to return
+
+ query: The search query text
+
+ alpha:
+ Weight for hybrid search (0-1):
+
+ - 0 = pure keyword search (BM25)
+ - 1 = pure vector search (default)
+ - 0.5 = balanced hybrid search
+
+ filters: Metadata filters to apply to the search
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_id:
+ raise ValueError(f"Expected a non-empty value for `knowledge_base_id` but received {knowledge_base_id!r}")
+ return await self._post(
+ f"/{knowledge_base_id}/retrieve"
+ if self._client._base_url_overridden
+ else f"https://kbaas.do-ai.run/v1/{knowledge_base_id}/retrieve",
+ body=await async_maybe_transform(
+ {
+ "num_results": num_results,
+ "query": query,
+ "alpha": alpha,
+ "filters": filters,
+ },
+ retrieve_documents_params.RetrieveDocumentsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RetrieveDocumentsResponse,
+ )
+
+
+class RetrieveResourceWithRawResponse:
+ def __init__(self, retrieve: RetrieveResource) -> None:
+ self._retrieve = retrieve
+
+ self.documents = to_raw_response_wrapper(
+ retrieve.documents,
+ )
+
+
+class AsyncRetrieveResourceWithRawResponse:
+ def __init__(self, retrieve: AsyncRetrieveResource) -> None:
+ self._retrieve = retrieve
+
+ self.documents = async_to_raw_response_wrapper(
+ retrieve.documents,
+ )
+
+
+class RetrieveResourceWithStreamingResponse:
+ def __init__(self, retrieve: RetrieveResource) -> None:
+ self._retrieve = retrieve
+
+ self.documents = to_streamed_response_wrapper(
+ retrieve.documents,
+ )
+
+
+class AsyncRetrieveResourceWithStreamingResponse:
+ def __init__(self, retrieve: AsyncRetrieveResource) -> None:
+ self._retrieve = retrieve
+
+ self.documents = async_to_streamed_response_wrapper(
+ retrieve.documents,
+ )
diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py
index 58d45641..e927e099 100644
--- a/src/gradient/types/__init__.py
+++ b/src/gradient/types/__init__.py
@@ -85,6 +85,7 @@
from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams
from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse
from .nf_initiate_action_params import NfInitiateActionParams as NfInitiateActionParams
+from .retrieve_documents_params import RetrieveDocumentsParams as RetrieveDocumentsParams
from .agent_update_status_params import (
AgentUpdateStatusParams as AgentUpdateStatusParams,
)
@@ -100,6 +101,7 @@
GPUDropletCreateResponse as GPUDropletCreateResponse,
)
from .nf_initiate_action_response import NfInitiateActionResponse as NfInitiateActionResponse
+from .retrieve_documents_response import RetrieveDocumentsResponse as RetrieveDocumentsResponse
from .agent_update_status_response import (
AgentUpdateStatusResponse as AgentUpdateStatusResponse,
)
diff --git a/src/gradient/types/agent_create_response.py b/src/gradient/types/agent_create_response.py
index edd48b7d..a9138a04 100644
--- a/src/gradient/types/agent_create_response.py
+++ b/src/gradient/types/agent_create_response.py
@@ -10,6 +10,8 @@
class AgentCreateResponse(BaseModel):
+ """Information about a newly created Agent"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agent_delete_response.py b/src/gradient/types/agent_delete_response.py
index 8c2b2e14..c16ea9fc 100644
--- a/src/gradient/types/agent_delete_response.py
+++ b/src/gradient/types/agent_delete_response.py
@@ -10,6 +10,8 @@
class AgentDeleteResponse(BaseModel):
+ """Info about a deleted agent"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agent_list_response.py b/src/gradient/types/agent_list_response.py
index c461f152..a3b5cf6c 100644
--- a/src/gradient/types/agent_list_response.py
+++ b/src/gradient/types/agent_list_response.py
@@ -24,6 +24,8 @@
class AgentChatbot(BaseModel):
+ """A Chatbot"""
+
allowed_domains: Optional[List[str]] = None
button_background_color: Optional[str] = None
@@ -41,11 +43,15 @@ class AgentChatbot(BaseModel):
class AgentChatbotIdentifier(BaseModel):
+ """Agent Chatbot Identifier"""
+
agent_chatbot_identifier: Optional[str] = None
"""Agent chatbot identifier"""
class AgentDeployment(BaseModel):
+ """Description of deployment"""
+
created_at: Optional[datetime] = None
"""Creation date / time"""
@@ -98,6 +104,8 @@ class AgentTemplateGuardrail(BaseModel):
class AgentTemplate(BaseModel):
+ """Represents an AgentTemplate entity"""
+
created_at: Optional[datetime] = None
"""The agent template's creation date"""
@@ -157,6 +165,8 @@ class AgentTemplate(BaseModel):
class Agent(BaseModel):
+ """A GenAI Agent's configuration"""
+
chatbot: Optional[AgentChatbot] = None
"""A Chatbot"""
@@ -266,6 +276,8 @@ class Agent(BaseModel):
class AgentListResponse(BaseModel):
+ """List of Agents"""
+
agents: Optional[List[Agent]] = None
"""Agents"""
diff --git a/src/gradient/types/agent_retrieve_response.py b/src/gradient/types/agent_retrieve_response.py
index 2836558b..c8b25e0b 100644
--- a/src/gradient/types/agent_retrieve_response.py
+++ b/src/gradient/types/agent_retrieve_response.py
@@ -10,6 +10,8 @@
class AgentRetrieveResponse(BaseModel):
+ """One Agent"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agent_retrieve_usage_response.py b/src/gradient/types/agent_retrieve_usage_response.py
index 1d65addd..f4622ec8 100644
--- a/src/gradient/types/agent_retrieve_usage_response.py
+++ b/src/gradient/types/agent_retrieve_usage_response.py
@@ -9,12 +9,16 @@
class LogInsightsUsageMeasurement(BaseModel):
+ """Usage Measurement Description"""
+
tokens: Optional[int] = None
usage_type: Optional[str] = None
class LogInsightsUsage(BaseModel):
+ """Resource Usage Description"""
+
measurements: Optional[List[LogInsightsUsageMeasurement]] = None
resource_uuid: Optional[str] = None
@@ -25,12 +29,16 @@ class LogInsightsUsage(BaseModel):
class UsageMeasurement(BaseModel):
+ """Usage Measurement Description"""
+
tokens: Optional[int] = None
usage_type: Optional[str] = None
class Usage(BaseModel):
+ """Resource Usage Description"""
+
measurements: Optional[List[UsageMeasurement]] = None
resource_uuid: Optional[str] = None
@@ -41,6 +49,8 @@ class Usage(BaseModel):
class AgentRetrieveUsageResponse(BaseModel):
+ """Agent usage"""
+
log_insights_usage: Optional[LogInsightsUsage] = None
"""Resource Usage Description"""
diff --git a/src/gradient/types/agent_update_response.py b/src/gradient/types/agent_update_response.py
index 1976089b..fb232225 100644
--- a/src/gradient/types/agent_update_response.py
+++ b/src/gradient/types/agent_update_response.py
@@ -10,6 +10,8 @@
class AgentUpdateResponse(BaseModel):
+ """Information about an updated agent"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agent_update_status_response.py b/src/gradient/types/agent_update_status_response.py
index 84457d85..a562915e 100644
--- a/src/gradient/types/agent_update_status_response.py
+++ b/src/gradient/types/agent_update_status_response.py
@@ -10,6 +10,8 @@
class AgentUpdateStatusResponse(BaseModel):
+ """UpdateAgentDeploymentVisbilityOutput description"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agents/api_evaluation_metric.py b/src/gradient/types/agents/api_evaluation_metric.py
index 2d3b4194..84c3ea0a 100644
--- a/src/gradient/types/agents/api_evaluation_metric.py
+++ b/src/gradient/types/agents/api_evaluation_metric.py
@@ -9,13 +9,28 @@
class APIEvaluationMetric(BaseModel):
+ category: Optional[
+ Literal[
+ "METRIC_CATEGORY_UNSPECIFIED",
+ "METRIC_CATEGORY_CORRECTNESS",
+ "METRIC_CATEGORY_USER_OUTCOMES",
+ "METRIC_CATEGORY_SAFETY_AND_SECURITY",
+ "METRIC_CATEGORY_CONTEXT_QUALITY",
+ "METRIC_CATEGORY_MODEL_FIT",
+ ]
+ ] = None
+
description: Optional[str] = None
inverted: Optional[bool] = None
"""If true, the metric is inverted, meaning that a lower value is better."""
+ is_metric_goal: Optional[bool] = None
+
metric_name: Optional[str] = None
+ metric_rank: Optional[int] = None
+
metric_type: Optional[
Literal["METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", "METRIC_TYPE_RAG_AND_TOOL"]
] = None
diff --git a/src/gradient/types/agents/api_link_knowledge_base_output.py b/src/gradient/types/agents/api_link_knowledge_base_output.py
index 2e7cec1e..d59f2677 100644
--- a/src/gradient/types/agents/api_link_knowledge_base_output.py
+++ b/src/gradient/types/agents/api_link_knowledge_base_output.py
@@ -10,6 +10,8 @@
class APILinkKnowledgeBaseOutput(BaseModel):
+ """Information about a linked knowledge base"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agents/chat/completion_create_params.py b/src/gradient/types/agents/chat/completion_create_params.py
index 0980132e..d238f8e1 100644
--- a/src/gradient/types/agents/chat/completion_create_params.py
+++ b/src/gradient/types/agents/chat/completion_create_params.py
@@ -166,6 +166,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ """Content part with type and text"""
+
text: Required[str]
"""The text content"""
@@ -179,6 +181,11 @@ class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMem
class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
+ """
+ System-provided instructions that the model should follow, regardless of
+ messages sent by the user.
+ """
+
content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart]]]
"""The contents of the system message."""
@@ -187,6 +194,8 @@ class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ """Content part with type and text"""
+
text: Required[str]
"""The text content"""
@@ -200,6 +209,11 @@ class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnion
class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
+ """
+ Developer-provided instructions that the model should follow, regardless of
+ messages sent by the user.
+ """
+
content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart]]]
"""The contents of the developer message."""
@@ -208,6 +222,8 @@ class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ """Content part with type and text"""
+
text: Required[str]
"""The text content"""
@@ -221,6 +237,11 @@ class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMembe
class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
+ """
+ Messages sent by an end user, containing prompts or additional context
+ information.
+ """
+
content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]]
"""The contents of the user message."""
@@ -229,6 +250,8 @@ class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ """Content part with type and text"""
+
text: Required[str]
"""The text content"""
@@ -242,6 +265,8 @@ class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnion
class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False):
+ """The function that the model called."""
+
arguments: Required[str]
"""
The arguments to call the function with, as generated by the model in JSON
@@ -266,6 +291,8 @@ class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=Fals
class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False):
+ """Messages sent by the model in response to user messages."""
+
role: Required[Literal["assistant"]]
"""The role of the messages author, in this case `assistant`."""
@@ -297,6 +324,8 @@ class MessageChatCompletionRequestToolMessage(TypedDict, total=False):
class StreamOptions(TypedDict, total=False):
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
include_usage: bool
"""If set, an additional chunk will be streamed before the `data: [DONE]` message.
@@ -315,6 +344,11 @@ class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False):
class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False):
+ """Specifies a tool the model should use.
+
+ Use to force the model to call a specific function.
+ """
+
function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction]
type: Required[Literal["function"]]
diff --git a/src/gradient/types/agents/chat/completion_create_response.py b/src/gradient/types/agents/chat/completion_create_response.py
index 69b3d203..88c64763 100644
--- a/src/gradient/types/agents/chat/completion_create_response.py
+++ b/src/gradient/types/agents/chat/completion_create_response.py
@@ -18,6 +18,8 @@
class ChoiceLogprobs(BaseModel):
+ """Log probability information for the choice."""
+
content: Optional[List[ChatCompletionTokenLogprob]] = None
"""A list of message content tokens with log probability information."""
@@ -26,6 +28,8 @@ class ChoiceLogprobs(BaseModel):
class ChoiceMessageToolCallFunction(BaseModel):
+ """The function that the model called."""
+
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
@@ -50,6 +54,8 @@ class ChoiceMessageToolCall(BaseModel):
class ChoiceMessage(BaseModel):
+ """A chat completion message generated by the model."""
+
content: Optional[str] = None
"""The contents of the message."""
@@ -86,6 +92,10 @@ class Choice(BaseModel):
class CompletionCreateResponse(BaseModel):
+ """
+ Represents a chat completion response returned by model, based on the provided input.
+ """
+
id: str
"""A unique identifier for the chat completion."""
diff --git a/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py
index 9a4000c0..9412b46c 100644
--- a/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py
+++ b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py
@@ -14,6 +14,8 @@ class EvaluationDatasetCreateFileUploadPresignedURLsParams(TypedDict, total=Fals
class File(TypedDict, total=False):
+ """A single file’s metadata in the request."""
+
file_name: str
"""Local filename"""
diff --git a/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py
index bee94c93..3648a9ed 100644
--- a/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py
+++ b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py
@@ -9,6 +9,8 @@
class Upload(BaseModel):
+ """Detailed info about each presigned URL returned to the client."""
+
expires_at: Optional[datetime] = None
"""The time the url expires at."""
@@ -23,6 +25,8 @@ class Upload(BaseModel):
class EvaluationDatasetCreateFileUploadPresignedURLsResponse(BaseModel):
+ """Response with pre-signed urls to upload files."""
+
request_id: Optional[str] = None
"""The ID generated for the request for Presigned URLs."""
diff --git a/src/gradient/types/agents/evaluation_dataset_create_response.py b/src/gradient/types/agents/evaluation_dataset_create_response.py
index f5c7fbac..4e5f8c9b 100644
--- a/src/gradient/types/agents/evaluation_dataset_create_response.py
+++ b/src/gradient/types/agents/evaluation_dataset_create_response.py
@@ -8,5 +8,7 @@
class EvaluationDatasetCreateResponse(BaseModel):
+ """Output for creating an agent evaluation dataset"""
+
evaluation_dataset_uuid: Optional[str] = None
"""Evaluation dataset uuid."""
diff --git a/src/gradient/types/agents/evaluation_metric_list_regions_response.py b/src/gradient/types/agents/evaluation_metric_list_regions_response.py
index 7246d484..dc07a7ef 100644
--- a/src/gradient/types/agents/evaluation_metric_list_regions_response.py
+++ b/src/gradient/types/agents/evaluation_metric_list_regions_response.py
@@ -8,6 +8,8 @@
class Region(BaseModel):
+ """Description for a specific Region"""
+
inference_url: Optional[str] = None
"""Url for inference server"""
@@ -25,5 +27,7 @@ class Region(BaseModel):
class EvaluationMetricListRegionsResponse(BaseModel):
+ """Region Codes"""
+
regions: Optional[List[Region]] = None
"""Region code"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py
index 24b7bbb2..34babe47 100644
--- a/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py
@@ -9,5 +9,9 @@
class KeyCreateResponse(BaseModel):
+ """
+ CreateAnthropicAPIKeyOutput is used to return the newly created Anthropic API key.
+ """
+
api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
"""Anthropic API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py
index b5d8584e..c2796b36 100644
--- a/src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py
@@ -9,5 +9,7 @@
class KeyDeleteResponse(BaseModel):
+ """DeleteAnthropicAPIKeyOutput is used to return the deleted Anthropic API key."""
+
api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
"""Anthropic API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py
index 633211cc..34ab7508 100644
--- a/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py
@@ -12,6 +12,8 @@
class KeyListAgentsResponse(BaseModel):
+ """List of Agents that linked to a specific Anthropic Key"""
+
agents: Optional[List["APIAgent"]] = None
links: Optional[APILinks] = None
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py
index edc9e75a..21729e57 100644
--- a/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py
@@ -11,6 +11,10 @@
class KeyListResponse(BaseModel):
+ """
+ ListAnthropicAPIKeysOutput is used to return the list of Anthropic API keys for a specific agent.
+ """
+
api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
"""Api key infos"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py
index 06fa2d18..04d20e9b 100644
--- a/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py
@@ -9,5 +9,7 @@
class KeyUpdateResponse(BaseModel):
+ """UpdateAnthropicAPIKeyOutput is used to return the updated Anthropic API key."""
+
api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
"""Anthropic API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py
index 816b89f4..6277059b 100644
--- a/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py
@@ -8,6 +8,8 @@
class DropboxCreateTokensResponse(BaseModel):
+ """The dropbox oauth2 token and refresh token"""
+
token: Optional[str] = None
"""The access token"""
diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py
index 8be21b8a..f1e782c4 100644
--- a/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py
@@ -8,5 +8,7 @@
class Oauth2GenerateURLResponse(BaseModel):
+ """The url for the oauth2 flow"""
+
url: Optional[str] = None
"""The oauth2 url"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py
index 4af7b872..f6254e1c 100644
--- a/src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py
@@ -9,5 +9,7 @@
class KeyCreateResponse(BaseModel):
+ """CreateOpenAIAPIKeyOutput is used to return the newly created OpenAI API key."""
+
api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
"""OpenAI API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py
index f1ebc73a..1ac937f4 100644
--- a/src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py
@@ -9,5 +9,7 @@
class KeyDeleteResponse(BaseModel):
+ """DeleteOpenAIAPIKeyOutput is used to return the deleted OpenAI API key."""
+
api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
"""OpenAI API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py
index 633211cc..fa2ba7cc 100644
--- a/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py
@@ -12,6 +12,8 @@
class KeyListAgentsResponse(BaseModel):
+ """List of Agents that are linked to a specific OpenAI Key"""
+
agents: Optional[List["APIAgent"]] = None
links: Optional[APILinks] = None
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py
index 00738f68..f335cfc9 100644
--- a/src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py
@@ -11,6 +11,10 @@
class KeyListResponse(BaseModel):
+ """
+ ListOpenAIAPIKeysOutput is used to return the list of OpenAI API keys for a specific agent.
+ """
+
api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
"""Api key infos"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py
index 222a8416..28b56926 100644
--- a/src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py
@@ -9,5 +9,7 @@
class KeyUpdateResponse(BaseModel):
+ """UpdateOpenAIAPIKeyOutput is used to return the updated OpenAI API key."""
+
api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
"""OpenAI API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py
index c306c5b1..78cb1e73 100644
--- a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py
@@ -9,6 +9,8 @@
class IndexingInfo(BaseModel):
+ """Metadata for scheduled indexing entries"""
+
created_at: Optional[datetime] = None
"""Created at timestamp"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py
index febf3759..b359cb18 100644
--- a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py
@@ -9,6 +9,8 @@
class IndexingInfo(BaseModel):
+ """Metadata for scheduled indexing entries"""
+
created_at: Optional[datetime] = None
"""Created at timestamp"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py
index 1776c83d..4d3840a3 100644
--- a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py
@@ -9,6 +9,8 @@
class IndexingInfo(BaseModel):
+ """Metadata for scheduled indexing entries"""
+
created_at: Optional[datetime] = None
"""Created at timestamp"""
diff --git a/src/gradient/types/agents/evaluation_run_list_results_response.py b/src/gradient/types/agents/evaluation_run_list_results_response.py
index df830a5b..e06bac94 100644
--- a/src/gradient/types/agents/evaluation_run_list_results_response.py
+++ b/src/gradient/types/agents/evaluation_run_list_results_response.py
@@ -12,6 +12,8 @@
class EvaluationRunListResultsResponse(BaseModel):
+ """Gets the full results of an evaluation run with all prompts."""
+
evaluation_run: Optional[APIEvaluationRun] = None
links: Optional[APILinks] = None
diff --git a/src/gradient/types/agents/function_create_response.py b/src/gradient/types/agents/function_create_response.py
index 65a4bb2b..335ebac0 100644
--- a/src/gradient/types/agents/function_create_response.py
+++ b/src/gradient/types/agents/function_create_response.py
@@ -10,6 +10,8 @@
class FunctionCreateResponse(BaseModel):
+ """Information about a newly function linked agent"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agents/function_delete_response.py b/src/gradient/types/agents/function_delete_response.py
index 26ad02e6..7490d34d 100644
--- a/src/gradient/types/agents/function_delete_response.py
+++ b/src/gradient/types/agents/function_delete_response.py
@@ -10,6 +10,8 @@
class FunctionDeleteResponse(BaseModel):
+ """Information about a newly unlinked agent"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agents/function_update_response.py b/src/gradient/types/agents/function_update_response.py
index eebde3e6..72399e92 100644
--- a/src/gradient/types/agents/function_update_response.py
+++ b/src/gradient/types/agents/function_update_response.py
@@ -10,6 +10,8 @@
class FunctionUpdateResponse(BaseModel):
+ """The updated agent"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agents/knowledge_base_detach_response.py b/src/gradient/types/agents/knowledge_base_detach_response.py
index 0dc90aaf..c94b99a1 100644
--- a/src/gradient/types/agents/knowledge_base_detach_response.py
+++ b/src/gradient/types/agents/knowledge_base_detach_response.py
@@ -10,6 +10,8 @@
class KnowledgeBaseDetachResponse(BaseModel):
+ """Informatinon about a unlinked knowledge base"""
+
agent: Optional["APIAgent"] = None
"""An Agent"""
diff --git a/src/gradient/types/agents/route_add_response.py b/src/gradient/types/agents/route_add_response.py
index b9cc2b7d..b1755d54 100644
--- a/src/gradient/types/agents/route_add_response.py
+++ b/src/gradient/types/agents/route_add_response.py
@@ -8,6 +8,8 @@
class RouteAddResponse(BaseModel):
+ """Information about a newly linked agent"""
+
child_agent_uuid: Optional[str] = None
"""Routed agent id"""
diff --git a/src/gradient/types/agents/route_delete_response.py b/src/gradient/types/agents/route_delete_response.py
index b49c8b7c..6dcc03b6 100644
--- a/src/gradient/types/agents/route_delete_response.py
+++ b/src/gradient/types/agents/route_delete_response.py
@@ -8,6 +8,8 @@
class RouteDeleteResponse(BaseModel):
+ """Information about a removed linkage"""
+
child_agent_uuid: Optional[str] = None
"""Routed agent id"""
diff --git a/src/gradient/types/agents/route_update_response.py b/src/gradient/types/agents/route_update_response.py
index b79fc9fe..dfcec469 100644
--- a/src/gradient/types/agents/route_update_response.py
+++ b/src/gradient/types/agents/route_update_response.py
@@ -8,6 +8,8 @@
class RouteUpdateResponse(BaseModel):
+ """Information about an updated linkage"""
+
child_agent_uuid: Optional[str] = None
"""Routed agent id"""
diff --git a/src/gradient/types/agents/route_view_response.py b/src/gradient/types/agents/route_view_response.py
index f0ee2d71..ddbf6f33 100644
--- a/src/gradient/types/agents/route_view_response.py
+++ b/src/gradient/types/agents/route_view_response.py
@@ -10,6 +10,8 @@
class RouteViewResponse(BaseModel):
+ """Child list for an agent"""
+
children: Optional[List["APIAgent"]] = None
"""Child agents"""
diff --git a/src/gradient/types/agents/version_list_response.py b/src/gradient/types/agents/version_list_response.py
index c35a5ba4..75c45a95 100644
--- a/src/gradient/types/agents/version_list_response.py
+++ b/src/gradient/types/agents/version_list_response.py
@@ -38,6 +38,8 @@ class AgentVersionAttachedChildAgent(BaseModel):
class AgentVersionAttachedFunction(BaseModel):
+ """Function represents a function configuration for an agent"""
+
description: Optional[str] = None
"""Description of the function"""
@@ -55,6 +57,8 @@ class AgentVersionAttachedFunction(BaseModel):
class AgentVersionAttachedGuardrail(BaseModel):
+ """Agent Guardrail version"""
+
is_deleted: Optional[bool] = None
"""Whether the guardrail is deleted"""
@@ -80,6 +84,8 @@ class AgentVersionAttachedKnowledgebase(BaseModel):
class AgentVersion(BaseModel):
+ """Represents an AgentVersion entity"""
+
id: Optional[str] = None
"""Unique identifier"""
@@ -157,6 +163,8 @@ class AgentVersion(BaseModel):
class VersionListResponse(BaseModel):
+ """List of agent versions"""
+
agent_versions: Optional[List[AgentVersion]] = None
"""Agents"""
diff --git a/src/gradient/types/agents/version_update_response.py b/src/gradient/types/agents/version_update_response.py
index 464ef12f..ee1188d1 100644
--- a/src/gradient/types/agents/version_update_response.py
+++ b/src/gradient/types/agents/version_update_response.py
@@ -8,6 +8,8 @@
class AuditHeader(BaseModel):
+ """An alternative way to provide auth information. for internal use only."""
+
actor_id: Optional[str] = None
actor_ip: Optional[str] = None
diff --git a/src/gradient/types/api_agent.py b/src/gradient/types/api_agent.py
index f52e44c8..e3fb21f2 100644
--- a/src/gradient/types/api_agent.py
+++ b/src/gradient/types/api_agent.py
@@ -33,11 +33,15 @@
class APIKey(BaseModel):
+ """Agent API Key"""
+
api_key: Optional[str] = None
"""Api key"""
class Chatbot(BaseModel):
+ """A Chatbot"""
+
allowed_domains: Optional[List[str]] = None
button_background_color: Optional[str] = None
@@ -55,11 +59,15 @@ class Chatbot(BaseModel):
class ChatbotIdentifier(BaseModel):
+ """Agent Chatbot Identifier"""
+
agent_chatbot_identifier: Optional[str] = None
"""Agent chatbot identifier"""
class Deployment(BaseModel):
+ """Description of deployment"""
+
created_at: Optional[datetime] = None
"""Creation date / time"""
@@ -104,6 +112,8 @@ class Deployment(BaseModel):
class Function(BaseModel):
+ """Description missing"""
+
api_key: Optional[str] = None
"""Api key"""
@@ -138,6 +148,8 @@ class Function(BaseModel):
class Guardrail(BaseModel):
+ """A Agent Guardrail"""
+
agent_uuid: Optional[str] = None
created_at: Optional[datetime] = None
@@ -228,6 +240,8 @@ class TemplateGuardrail(BaseModel):
class Template(BaseModel):
+ """Represents an AgentTemplate entity"""
+
created_at: Optional[datetime] = None
"""The agent template's creation date"""
@@ -287,6 +301,8 @@ class Template(BaseModel):
class APIAgent(BaseModel):
+ """An Agent"""
+
anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None
"""Anthropic API Key Info"""
diff --git a/src/gradient/types/api_agent_api_key_info.py b/src/gradient/types/api_agent_api_key_info.py
index 7222153c..06bd0fda 100644
--- a/src/gradient/types/api_agent_api_key_info.py
+++ b/src/gradient/types/api_agent_api_key_info.py
@@ -9,6 +9,8 @@
class APIAgentAPIKeyInfo(BaseModel):
+ """Agent API Key Info"""
+
created_at: Optional[datetime] = None
"""Creation date"""
diff --git a/src/gradient/types/api_agent_model.py b/src/gradient/types/api_agent_model.py
index f111bfb7..e42bb5d5 100644
--- a/src/gradient/types/api_agent_model.py
+++ b/src/gradient/types/api_agent_model.py
@@ -12,6 +12,8 @@
class APIAgentModel(BaseModel):
+ """Description of a Model"""
+
agreement: Optional[APIAgreement] = None
"""Agreement Description"""
diff --git a/src/gradient/types/api_agreement.py b/src/gradient/types/api_agreement.py
index c4359f1f..8eca3c3c 100644
--- a/src/gradient/types/api_agreement.py
+++ b/src/gradient/types/api_agreement.py
@@ -8,6 +8,8 @@
class APIAgreement(BaseModel):
+ """Agreement Description"""
+
description: Optional[str] = None
name: Optional[str] = None
diff --git a/src/gradient/types/api_anthropic_api_key_info.py b/src/gradient/types/api_anthropic_api_key_info.py
index 6440c5ef..bf13fd60 100644
--- a/src/gradient/types/api_anthropic_api_key_info.py
+++ b/src/gradient/types/api_anthropic_api_key_info.py
@@ -9,6 +9,8 @@
class APIAnthropicAPIKeyInfo(BaseModel):
+ """Anthropic API Key Info"""
+
created_at: Optional[datetime] = None
"""Key creation date"""
diff --git a/src/gradient/types/api_knowledge_base.py b/src/gradient/types/api_knowledge_base.py
index 4e4a6567..e64f9336 100644
--- a/src/gradient/types/api_knowledge_base.py
+++ b/src/gradient/types/api_knowledge_base.py
@@ -10,6 +10,8 @@
class APIKnowledgeBase(BaseModel):
+ """Knowledgebase Description"""
+
added_to_agent_at: Optional[datetime] = None
"""Time when the knowledge base was added to the agent"""
diff --git a/src/gradient/types/api_model.py b/src/gradient/types/api_model.py
index e7f99bc1..83b1b66a 100644
--- a/src/gradient/types/api_model.py
+++ b/src/gradient/types/api_model.py
@@ -11,6 +11,8 @@
class APIModel(BaseModel):
+ """A machine learning model stored on the GenAI platform"""
+
id: Optional[str] = None
"""Human-readable model identifier"""
diff --git a/src/gradient/types/api_model_version.py b/src/gradient/types/api_model_version.py
index f19a78c6..3989e256 100644
--- a/src/gradient/types/api_model_version.py
+++ b/src/gradient/types/api_model_version.py
@@ -8,6 +8,8 @@
class APIModelVersion(BaseModel):
+ """Version Information about a Model"""
+
major: Optional[int] = None
"""Major version number"""
diff --git a/src/gradient/types/api_openai_api_key_info.py b/src/gradient/types/api_openai_api_key_info.py
index bcee992b..69e9b138 100644
--- a/src/gradient/types/api_openai_api_key_info.py
+++ b/src/gradient/types/api_openai_api_key_info.py
@@ -10,6 +10,8 @@
class APIOpenAIAPIKeyInfo(BaseModel):
+ """OpenAI API Key Info"""
+
created_at: Optional[datetime] = None
"""Key creation date"""
diff --git a/src/gradient/types/chat/completion_create_params.py b/src/gradient/types/chat/completion_create_params.py
index 7874d893..e889c5e8 100644
--- a/src/gradient/types/chat/completion_create_params.py
+++ b/src/gradient/types/chat/completion_create_params.py
@@ -166,6 +166,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ """Content part with type and text"""
+
text: Required[str]
"""The text content"""
@@ -179,6 +181,11 @@ class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMem
class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
+ """
+ System-provided instructions that the model should follow, regardless of
+ messages sent by the user.
+ """
+
content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart]]]
"""The contents of the system message."""
@@ -187,6 +194,8 @@ class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ """Content part with type and text"""
+
text: Required[str]
"""The text content"""
@@ -200,6 +209,11 @@ class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnion
class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
+ """
+ Developer-provided instructions that the model should follow, regardless of
+ messages sent by the user.
+ """
+
content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart]]]
"""The contents of the developer message."""
@@ -208,6 +222,8 @@ class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ """Content part with type and text"""
+
text: Required[str]
"""The text content"""
@@ -221,6 +237,11 @@ class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMembe
class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
+ """
+ Messages sent by an end user, containing prompts or additional context
+ information.
+ """
+
content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]]
"""The contents of the user message."""
@@ -229,6 +250,8 @@ class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ """Content part with type and text"""
+
text: Required[str]
"""The text content"""
@@ -242,6 +265,8 @@ class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnion
class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False):
+ """The function that the model called."""
+
arguments: Required[str]
"""
The arguments to call the function with, as generated by the model in JSON
@@ -266,6 +291,8 @@ class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=Fals
class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False):
+ """Messages sent by the model in response to user messages."""
+
role: Required[Literal["assistant"]]
"""The role of the messages author, in this case `assistant`."""
@@ -297,6 +324,8 @@ class MessageChatCompletionRequestToolMessage(TypedDict, total=False):
class StreamOptions(TypedDict, total=False):
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
include_usage: bool
"""If set, an additional chunk will be streamed before the `data: [DONE]` message.
@@ -315,6 +344,11 @@ class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False):
class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False):
+ """Specifies a tool the model should use.
+
+ Use to force the model to call a specific function.
+ """
+
function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction]
type: Required[Literal["function"]]
diff --git a/src/gradient/types/chat/completion_create_response.py b/src/gradient/types/chat/completion_create_response.py
index 151f6556..13efee40 100644
--- a/src/gradient/types/chat/completion_create_response.py
+++ b/src/gradient/types/chat/completion_create_response.py
@@ -18,6 +18,8 @@
class ChoiceLogprobs(BaseModel):
+ """Log probability information for the choice."""
+
content: Optional[List[ChatCompletionTokenLogprob]] = None
"""A list of message content tokens with log probability information."""
@@ -26,6 +28,8 @@ class ChoiceLogprobs(BaseModel):
class ChoiceMessageToolCallFunction(BaseModel):
+ """The function that the model called."""
+
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
@@ -50,6 +54,8 @@ class ChoiceMessageToolCall(BaseModel):
class ChoiceMessage(BaseModel):
+ """A chat completion message generated by the model."""
+
content: Optional[str] = None
"""The contents of the message."""
@@ -86,6 +92,10 @@ class Choice(BaseModel):
class CompletionCreateResponse(BaseModel):
+ """
+ Represents a chat completion response returned by model, based on the provided input.
+ """
+
id: str
"""A unique identifier for the chat completion."""
diff --git a/src/gradient/types/gpu_droplets/associated_resource.py b/src/gradient/types/gpu_droplets/associated_resource.py
index f72c3d32..500c69e2 100644
--- a/src/gradient/types/gpu_droplets/associated_resource.py
+++ b/src/gradient/types/gpu_droplets/associated_resource.py
@@ -8,6 +8,8 @@
class AssociatedResource(BaseModel):
+ """An objects containing information about a resource associated with a Droplet."""
+
id: Optional[str] = None
"""The unique identifier for the resource associated with the Droplet."""
diff --git a/src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py
index f2f2ff67..8dd32c14 100644
--- a/src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py
+++ b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py
@@ -10,6 +10,10 @@
class Resources(BaseModel):
+ """
+ An object containing additional information about resource related to a Droplet requested to be destroyed.
+ """
+
floating_ips: Optional[List[DestroyedAssociatedResource]] = None
reserved_ips: Optional[List[DestroyedAssociatedResource]] = None
@@ -22,6 +26,8 @@ class Resources(BaseModel):
class DestroyWithAssociatedResourceCheckStatusResponse(BaseModel):
+ """An objects containing information about a resources scheduled for deletion."""
+
completed_at: Optional[datetime] = None
"""
A time value given in ISO8601 combined date and time format indicating when the
diff --git a/src/gradient/types/gpu_droplets/destroyed_associated_resource.py b/src/gradient/types/gpu_droplets/destroyed_associated_resource.py
index 358c14e9..fd3784e4 100644
--- a/src/gradient/types/gpu_droplets/destroyed_associated_resource.py
+++ b/src/gradient/types/gpu_droplets/destroyed_associated_resource.py
@@ -9,6 +9,8 @@
class DestroyedAssociatedResource(BaseModel):
+ """An object containing information about a resource scheduled for deletion."""
+
id: Optional[str] = None
"""The unique identifier for the resource scheduled for deletion."""
diff --git a/src/gradient/types/gpu_droplets/domains.py b/src/gradient/types/gpu_droplets/domains.py
index 6a9400f9..14d4a0bb 100644
--- a/src/gradient/types/gpu_droplets/domains.py
+++ b/src/gradient/types/gpu_droplets/domains.py
@@ -8,6 +8,8 @@
class Domains(BaseModel):
+ """An object specifying domain configurations for a Global load balancer."""
+
certificate_id: Optional[str] = None
"""The ID of the TLS certificate used for SSL termination."""
diff --git a/src/gradient/types/gpu_droplets/domains_param.py b/src/gradient/types/gpu_droplets/domains_param.py
index d2d21faf..44481775 100644
--- a/src/gradient/types/gpu_droplets/domains_param.py
+++ b/src/gradient/types/gpu_droplets/domains_param.py
@@ -8,6 +8,8 @@
class DomainsParam(TypedDict, total=False):
+ """An object specifying domain configurations for a Global load balancer."""
+
certificate_id: str
"""The ID of the TLS certificate used for SSL termination."""
diff --git a/src/gradient/types/gpu_droplets/floating_ip.py b/src/gradient/types/gpu_droplets/floating_ip.py
index 81c58753..f592d510 100644
--- a/src/gradient/types/gpu_droplets/floating_ip.py
+++ b/src/gradient/types/gpu_droplets/floating_ip.py
@@ -12,6 +12,11 @@
class Region(region.Region):
+ """The region that the floating IP is reserved to.
+
+ When you query a floating IP, the entire region object will be returned.
+ """
+
pass
diff --git a/src/gradient/types/gpu_droplets/forwarding_rule.py b/src/gradient/types/gpu_droplets/forwarding_rule.py
index 40a310ab..a9345e05 100644
--- a/src/gradient/types/gpu_droplets/forwarding_rule.py
+++ b/src/gradient/types/gpu_droplets/forwarding_rule.py
@@ -9,6 +9,8 @@
class ForwardingRule(BaseModel):
+ """An object specifying a forwarding rule for a load balancer."""
+
entry_port: int
"""
An integer representing the port on which the load balancer instance will
diff --git a/src/gradient/types/gpu_droplets/forwarding_rule_param.py b/src/gradient/types/gpu_droplets/forwarding_rule_param.py
index 70285bf6..f81dfd6b 100644
--- a/src/gradient/types/gpu_droplets/forwarding_rule_param.py
+++ b/src/gradient/types/gpu_droplets/forwarding_rule_param.py
@@ -8,6 +8,8 @@
class ForwardingRuleParam(TypedDict, total=False):
+ """An object specifying a forwarding rule for a load balancer."""
+
entry_port: Required[int]
"""
An integer representing the port on which the load balancer instance will
diff --git a/src/gradient/types/gpu_droplets/glb_settings.py b/src/gradient/types/gpu_droplets/glb_settings.py
index 9aa790d8..0af332eb 100644
--- a/src/gradient/types/gpu_droplets/glb_settings.py
+++ b/src/gradient/types/gpu_droplets/glb_settings.py
@@ -9,11 +9,15 @@
class Cdn(BaseModel):
+ """An object specifying CDN configurations for a Global load balancer."""
+
is_enabled: Optional[bool] = None
"""A boolean flag to enable CDN caching."""
class GlbSettings(BaseModel):
+ """An object specifying forwarding configurations for a Global load balancer."""
+
cdn: Optional[Cdn] = None
"""An object specifying CDN configurations for a Global load balancer."""
diff --git a/src/gradient/types/gpu_droplets/glb_settings_param.py b/src/gradient/types/gpu_droplets/glb_settings_param.py
index f1b25c8b..a790d0ee 100644
--- a/src/gradient/types/gpu_droplets/glb_settings_param.py
+++ b/src/gradient/types/gpu_droplets/glb_settings_param.py
@@ -9,11 +9,15 @@
class Cdn(TypedDict, total=False):
+ """An object specifying CDN configurations for a Global load balancer."""
+
is_enabled: bool
"""A boolean flag to enable CDN caching."""
class GlbSettingsParam(TypedDict, total=False):
+ """An object specifying forwarding configurations for a Global load balancer."""
+
cdn: Cdn
"""An object specifying CDN configurations for a Global load balancer."""
diff --git a/src/gradient/types/gpu_droplets/health_check.py b/src/gradient/types/gpu_droplets/health_check.py
index db44d84e..e20cbc65 100644
--- a/src/gradient/types/gpu_droplets/health_check.py
+++ b/src/gradient/types/gpu_droplets/health_check.py
@@ -9,6 +9,8 @@
class HealthCheck(BaseModel):
+ """An object specifying health check settings for the load balancer."""
+
check_interval_seconds: Optional[int] = None
"""The number of seconds between between two consecutive health checks."""
diff --git a/src/gradient/types/gpu_droplets/health_check_param.py b/src/gradient/types/gpu_droplets/health_check_param.py
index e840f818..47de9e45 100644
--- a/src/gradient/types/gpu_droplets/health_check_param.py
+++ b/src/gradient/types/gpu_droplets/health_check_param.py
@@ -8,6 +8,8 @@
class HealthCheckParam(TypedDict, total=False):
+ """An object specifying health check settings for the load balancer."""
+
check_interval_seconds: int
"""The number of seconds between between two consecutive health checks."""
diff --git a/src/gradient/types/gpu_droplets/lb_firewall.py b/src/gradient/types/gpu_droplets/lb_firewall.py
index aea1887c..d233c642 100644
--- a/src/gradient/types/gpu_droplets/lb_firewall.py
+++ b/src/gradient/types/gpu_droplets/lb_firewall.py
@@ -8,6 +8,10 @@
class LbFirewall(BaseModel):
+ """
+ An object specifying allow and deny rules to control traffic to the load balancer.
+ """
+
allow: Optional[List[str]] = None
"""
the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or
diff --git a/src/gradient/types/gpu_droplets/lb_firewall_param.py b/src/gradient/types/gpu_droplets/lb_firewall_param.py
index 7d54a048..b15cb32c 100644
--- a/src/gradient/types/gpu_droplets/lb_firewall_param.py
+++ b/src/gradient/types/gpu_droplets/lb_firewall_param.py
@@ -10,6 +10,10 @@
class LbFirewallParam(TypedDict, total=False):
+ """
+ An object specifying allow and deny rules to control traffic to the load balancer.
+ """
+
allow: SequenceNotStr[str]
"""
the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or
diff --git a/src/gradient/types/gpu_droplets/sticky_sessions.py b/src/gradient/types/gpu_droplets/sticky_sessions.py
index 78debc07..1723241a 100644
--- a/src/gradient/types/gpu_droplets/sticky_sessions.py
+++ b/src/gradient/types/gpu_droplets/sticky_sessions.py
@@ -9,6 +9,8 @@
class StickySessions(BaseModel):
+ """An object specifying sticky sessions settings for the load balancer."""
+
cookie_name: Optional[str] = None
"""The name of the cookie sent to the client.
diff --git a/src/gradient/types/gpu_droplets/sticky_sessions_param.py b/src/gradient/types/gpu_droplets/sticky_sessions_param.py
index acea4a4a..425873dc 100644
--- a/src/gradient/types/gpu_droplets/sticky_sessions_param.py
+++ b/src/gradient/types/gpu_droplets/sticky_sessions_param.py
@@ -8,6 +8,8 @@
class StickySessionsParam(TypedDict, total=False):
+ """An object specifying sticky sessions settings for the load balancer."""
+
cookie_name: str
"""The name of the cookie sent to the client.
diff --git a/src/gradient/types/image_generate_response.py b/src/gradient/types/image_generate_response.py
index 5f97697c..324e6038 100644
--- a/src/gradient/types/image_generate_response.py
+++ b/src/gradient/types/image_generate_response.py
@@ -8,6 +8,8 @@
class Data(BaseModel):
+ """Represents the content of a generated image from GPT-IMAGE-1"""
+
b64_json: str
"""The base64-encoded JSON of the generated image.
@@ -22,11 +24,15 @@ class Data(BaseModel):
class UsageInputTokensDetails(BaseModel):
+ """Detailed breakdown of input tokens"""
+
text_tokens: Optional[int] = None
"""Number of text tokens in the input"""
class Usage(BaseModel):
+ """Usage statistics for the image generation request"""
+
input_tokens: int
"""Number of tokens in the input prompt"""
@@ -41,6 +47,8 @@ class Usage(BaseModel):
class ImageGenerateResponse(BaseModel):
+ """The response from the image generation endpoint"""
+
created: int
"""The Unix timestamp (in seconds) of when the images were created"""
diff --git a/src/gradient/types/inference/api_model_api_key_info.py b/src/gradient/types/inference/api_model_api_key_info.py
index 3da1c70a..28f96839 100644
--- a/src/gradient/types/inference/api_model_api_key_info.py
+++ b/src/gradient/types/inference/api_model_api_key_info.py
@@ -9,6 +9,8 @@
class APIModelAPIKeyInfo(BaseModel):
+ """Model API Key Info"""
+
created_at: Optional[datetime] = None
"""Creation date"""
diff --git a/src/gradient/types/knowledge_base_create_params.py b/src/gradient/types/knowledge_base_create_params.py
index 4dc42098..24cfd98b 100644
--- a/src/gradient/types/knowledge_base_create_params.py
+++ b/src/gradient/types/knowledge_base_create_params.py
@@ -53,6 +53,8 @@ class KnowledgeBaseCreateParams(TypedDict, total=False):
class DatasourceDropboxDataSource(TypedDict, total=False):
+ """Dropbox Data Source"""
+
folder: str
refresh_token: str
@@ -64,6 +66,8 @@ class DatasourceDropboxDataSource(TypedDict, total=False):
class DatasourceGoogleDriveDataSource(TypedDict, total=False):
+ """Google Drive Data Source"""
+
folder_id: str
refresh_token: str
diff --git a/src/gradient/types/knowledge_base_create_response.py b/src/gradient/types/knowledge_base_create_response.py
index 6d846fa5..72f0b58c 100644
--- a/src/gradient/types/knowledge_base_create_response.py
+++ b/src/gradient/types/knowledge_base_create_response.py
@@ -9,5 +9,7 @@
class KnowledgeBaseCreateResponse(BaseModel):
+ """Information about a newly created knowledge base"""
+
knowledge_base: Optional[APIKnowledgeBase] = None
"""Knowledgebase Description"""
diff --git a/src/gradient/types/knowledge_base_delete_response.py b/src/gradient/types/knowledge_base_delete_response.py
index b0605a20..cdf154ba 100644
--- a/src/gradient/types/knowledge_base_delete_response.py
+++ b/src/gradient/types/knowledge_base_delete_response.py
@@ -8,5 +8,7 @@
class KnowledgeBaseDeleteResponse(BaseModel):
+ """Information about a deleted knowledge base"""
+
uuid: Optional[str] = None
"""The id of the deleted knowledge base"""
diff --git a/src/gradient/types/knowledge_base_list_indexing_jobs_response.py b/src/gradient/types/knowledge_base_list_indexing_jobs_response.py
index d88f83fc..f5376c61 100644
--- a/src/gradient/types/knowledge_base_list_indexing_jobs_response.py
+++ b/src/gradient/types/knowledge_base_list_indexing_jobs_response.py
@@ -11,6 +11,8 @@
class KnowledgeBaseListIndexingJobsResponse(BaseModel):
+ """Indexing jobs"""
+
jobs: Optional[List[APIIndexingJob]] = None
"""The indexing jobs"""
diff --git a/src/gradient/types/knowledge_base_list_response.py b/src/gradient/types/knowledge_base_list_response.py
index 08227316..3231f643 100644
--- a/src/gradient/types/knowledge_base_list_response.py
+++ b/src/gradient/types/knowledge_base_list_response.py
@@ -11,6 +11,8 @@
class KnowledgeBaseListResponse(BaseModel):
+ """List of knowledge bases"""
+
knowledge_bases: Optional[List[APIKnowledgeBase]] = None
"""The knowledge bases"""
diff --git a/src/gradient/types/knowledge_base_retrieve_response.py b/src/gradient/types/knowledge_base_retrieve_response.py
index 55994f70..712f858c 100644
--- a/src/gradient/types/knowledge_base_retrieve_response.py
+++ b/src/gradient/types/knowledge_base_retrieve_response.py
@@ -10,6 +10,8 @@
class KnowledgeBaseRetrieveResponse(BaseModel):
+ """The knowledge base"""
+
database_status: Optional[
Literal[
"CREATING",
diff --git a/src/gradient/types/knowledge_base_update_response.py b/src/gradient/types/knowledge_base_update_response.py
index 0840622c..0e4ff539 100644
--- a/src/gradient/types/knowledge_base_update_response.py
+++ b/src/gradient/types/knowledge_base_update_response.py
@@ -9,5 +9,7 @@
class KnowledgeBaseUpdateResponse(BaseModel):
+ """Information about an updated knowledge base"""
+
knowledge_base: Optional[APIKnowledgeBase] = None
"""Knowledgebase Description"""
diff --git a/src/gradient/types/knowledge_bases/api_file_upload_data_source.py b/src/gradient/types/knowledge_bases/api_file_upload_data_source.py
index a1c23e09..6aaeb0e7 100644
--- a/src/gradient/types/knowledge_bases/api_file_upload_data_source.py
+++ b/src/gradient/types/knowledge_bases/api_file_upload_data_source.py
@@ -8,6 +8,8 @@
class APIFileUploadDataSource(BaseModel):
+ """File to upload as data source for knowledge base."""
+
original_file_name: Optional[str] = None
"""The original file name"""
diff --git a/src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py
index 562f8a34..3cdd34ee 100644
--- a/src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py
+++ b/src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py
@@ -8,6 +8,8 @@
class APIFileUploadDataSourceParam(TypedDict, total=False):
+ """File to upload as data source for knowledge base."""
+
original_file_name: str
"""The original file name"""
diff --git a/src/gradient/types/knowledge_bases/api_indexing_job.py b/src/gradient/types/knowledge_bases/api_indexing_job.py
index 93124cf8..d43ddd6e 100644
--- a/src/gradient/types/knowledge_bases/api_indexing_job.py
+++ b/src/gradient/types/knowledge_bases/api_indexing_job.py
@@ -11,6 +11,8 @@
class APIIndexingJob(BaseModel):
+ """IndexingJob description"""
+
completed_datasources: Optional[int] = None
"""Number of datasources indexed completed"""
@@ -53,6 +55,7 @@ class APIIndexingJob(BaseModel):
"INDEX_JOB_STATUS_FAILED",
"INDEX_JOB_STATUS_NO_CHANGES",
"INDEX_JOB_STATUS_PENDING",
+ "INDEX_JOB_STATUS_CANCELLED",
]
] = None
@@ -62,18 +65,6 @@ class APIIndexingJob(BaseModel):
total_datasources: Optional[int] = None
"""Number of datasources being indexed"""
- total_items_failed: Optional[str] = None
- """Total Items Failed"""
-
- total_items_indexed: Optional[str] = None
- """Total Items Indexed"""
-
- total_items_removed: Optional[str] = None
- """Total Items Removed"""
-
- total_items_skipped: Optional[str] = None
- """Total Items Skipped"""
-
total_tokens: Optional[str] = None
"""Total Tokens Consumed By the Indexing Job"""
diff --git a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py
index 223797c7..b73e325e 100644
--- a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py
+++ b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py
@@ -4,7 +4,6 @@
from datetime import datetime
from ..._models import BaseModel
-from .api_indexing_job import APIIndexingJob
from .api_spaces_data_source import APISpacesDataSource
from .api_indexed_data_source import APIIndexedDataSource
from .api_file_upload_data_source import APIFileUploadDataSource
@@ -14,6 +13,8 @@
class AwsDataSource(BaseModel):
+ """AWS S3 Data Source for Display"""
+
bucket_name: Optional[str] = None
"""Spaces bucket name"""
@@ -24,10 +25,14 @@ class AwsDataSource(BaseModel):
class DropboxDataSource(BaseModel):
+ """Dropbox Data Source for Display"""
+
folder: Optional[str] = None
class GoogleDriveDataSource(BaseModel):
+ """Google Drive Data Source for Display"""
+
folder_id: Optional[str] = None
folder_name: Optional[str] = None
@@ -35,6 +40,8 @@ class GoogleDriveDataSource(BaseModel):
class APIKnowledgeBaseDataSource(BaseModel):
+ """Data Source configuration for Knowledge Bases"""
+
aws_data_source: Optional[AwsDataSource] = None
"""AWS S3 Data Source for Display"""
@@ -58,9 +65,6 @@ class APIKnowledgeBaseDataSource(BaseModel):
last_datasource_indexing_job: Optional[APIIndexedDataSource] = None
- last_indexing_job: Optional[APIIndexingJob] = None
- """IndexingJob description"""
-
region: Optional[str] = None
"""Region code - Deprecated, moved to data_source_details"""
diff --git a/src/gradient/types/knowledge_bases/api_spaces_data_source.py b/src/gradient/types/knowledge_bases/api_spaces_data_source.py
index 02aa479a..2ac76d69 100644
--- a/src/gradient/types/knowledge_bases/api_spaces_data_source.py
+++ b/src/gradient/types/knowledge_bases/api_spaces_data_source.py
@@ -8,6 +8,8 @@
class APISpacesDataSource(BaseModel):
+ """Spaces Bucket Data Source"""
+
bucket_name: Optional[str] = None
"""Spaces bucket name"""
diff --git a/src/gradient/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradient/types/knowledge_bases/api_spaces_data_source_param.py
index 5eaeb0ad..9c3daf03 100644
--- a/src/gradient/types/knowledge_bases/api_spaces_data_source_param.py
+++ b/src/gradient/types/knowledge_bases/api_spaces_data_source_param.py
@@ -8,6 +8,8 @@
class APISpacesDataSourceParam(TypedDict, total=False):
+ """Spaces Bucket Data Source"""
+
bucket_name: str
"""Spaces bucket name"""
diff --git a/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py
index 63c9111a..ba1ee81f 100644
--- a/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py
+++ b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py
@@ -9,10 +9,12 @@
class APIWebCrawlerDataSource(BaseModel):
+ """WebCrawlerDataSource"""
+
base_url: Optional[str] = None
"""The base url to crawl."""
- crawling_option: Optional[Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"]] = None
+ crawling_option: Optional[Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS", "SITEMAP"]] = None
"""Options for specifying how URLs found on pages should be handled.
- UNKNOWN: Default unknown value
@@ -20,6 +22,7 @@ class APIWebCrawlerDataSource(BaseModel):
- PATH: Crawl the base URL and linked pages within the URL path.
- DOMAIN: Crawl the base URL and linked pages within the same domain.
- SUBDOMAINS: Crawl the base URL and linked pages for any subdomain.
+ - SITEMAP: Crawl URLs discovered in the sitemap.
"""
embed_media: Optional[bool] = None
diff --git a/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py
index 17988e73..ff4f3307 100644
--- a/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py
+++ b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py
@@ -10,10 +10,12 @@
class APIWebCrawlerDataSourceParam(TypedDict, total=False):
+ """WebCrawlerDataSource"""
+
base_url: str
"""The base url to crawl."""
- crawling_option: Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"]
+ crawling_option: Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS", "SITEMAP"]
"""Options for specifying how URLs found on pages should be handled.
- UNKNOWN: Default unknown value
@@ -21,6 +23,7 @@ class APIWebCrawlerDataSourceParam(TypedDict, total=False):
- PATH: Crawl the base URL and linked pages within the URL path.
- DOMAIN: Crawl the base URL and linked pages within the same domain.
- SUBDOMAINS: Crawl the base URL and linked pages for any subdomain.
+ - SITEMAP: Crawl URLs discovered in the sitemap.
"""
embed_media: bool
diff --git a/src/gradient/types/knowledge_bases/aws_data_source_param.py b/src/gradient/types/knowledge_bases/aws_data_source_param.py
index 912e3e29..fa99a8c1 100644
--- a/src/gradient/types/knowledge_bases/aws_data_source_param.py
+++ b/src/gradient/types/knowledge_bases/aws_data_source_param.py
@@ -8,6 +8,8 @@
class AwsDataSourceParam(TypedDict, total=False):
+ """AWS S3 Data Source"""
+
bucket_name: str
"""Spaces bucket name"""
diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py
index 253cbce7..1d27f0ca 100644
--- a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py
+++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py
@@ -14,6 +14,8 @@ class DataSourceCreatePresignedURLsParams(TypedDict, total=False):
class File(TypedDict, total=False):
+ """A single file’s metadata in the request."""
+
file_name: str
"""Local filename"""
diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py
index c3d172d7..daca9865 100644
--- a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py
+++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py
@@ -9,6 +9,8 @@
class Upload(BaseModel):
+ """Detailed info about each presigned URL returned to the client."""
+
expires_at: Optional[datetime] = None
"""The time the url expires at."""
@@ -23,6 +25,8 @@ class Upload(BaseModel):
class DataSourceCreatePresignedURLsResponse(BaseModel):
+ """Response with pre-signed urls to upload files."""
+
request_id: Optional[str] = None
"""The ID generated for the request for Presigned URLs."""
diff --git a/src/gradient/types/knowledge_bases/data_source_create_response.py b/src/gradient/types/knowledge_bases/data_source_create_response.py
index 76ec88e2..da49d870 100644
--- a/src/gradient/types/knowledge_bases/data_source_create_response.py
+++ b/src/gradient/types/knowledge_bases/data_source_create_response.py
@@ -9,5 +9,7 @@
class DataSourceCreateResponse(BaseModel):
+ """Information about a newly created knowldege base data source"""
+
knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None
"""Data Source configuration for Knowledge Bases"""
diff --git a/src/gradient/types/knowledge_bases/data_source_delete_response.py b/src/gradient/types/knowledge_bases/data_source_delete_response.py
index eaad72ff..fc3e59da 100644
--- a/src/gradient/types/knowledge_bases/data_source_delete_response.py
+++ b/src/gradient/types/knowledge_bases/data_source_delete_response.py
@@ -8,6 +8,8 @@
class DataSourceDeleteResponse(BaseModel):
+ """Information about a newly deleted knowledge base data source"""
+
data_source_uuid: Optional[str] = None
"""Data source id"""
diff --git a/src/gradient/types/knowledge_bases/data_source_list_response.py b/src/gradient/types/knowledge_bases/data_source_list_response.py
index f05a49bc..5de5d372 100644
--- a/src/gradient/types/knowledge_bases/data_source_list_response.py
+++ b/src/gradient/types/knowledge_bases/data_source_list_response.py
@@ -11,6 +11,8 @@
class DataSourceListResponse(BaseModel):
+ """A list of knowledge base data sources"""
+
knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None
"""The data sources"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_create_response.py b/src/gradient/types/knowledge_bases/indexing_job_create_response.py
index 685f40ef..df7e6911 100644
--- a/src/gradient/types/knowledge_bases/indexing_job_create_response.py
+++ b/src/gradient/types/knowledge_bases/indexing_job_create_response.py
@@ -9,5 +9,7 @@
class IndexingJobCreateResponse(BaseModel):
+ """StartKnowledgeBaseIndexingJobOutput description"""
+
job: Optional[APIIndexingJob] = None
"""IndexingJob description"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_list_response.py b/src/gradient/types/knowledge_bases/indexing_job_list_response.py
index 371f51bb..374533df 100644
--- a/src/gradient/types/knowledge_bases/indexing_job_list_response.py
+++ b/src/gradient/types/knowledge_bases/indexing_job_list_response.py
@@ -11,6 +11,8 @@
class IndexingJobListResponse(BaseModel):
+ """Indexing jobs"""
+
jobs: Optional[List[APIIndexingJob]] = None
"""The indexing jobs"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py
index 2d6be855..ea24de65 100644
--- a/src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py
+++ b/src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py
@@ -9,5 +9,7 @@
class IndexingJobRetrieveResponse(BaseModel):
+ """GetKnowledgeBaseIndexingJobOutput description"""
+
job: Optional[APIIndexingJob] = None
"""IndexingJob description"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py
index 9fd41764..2622779b 100644
--- a/src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py
+++ b/src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py
@@ -9,5 +9,7 @@
class IndexingJobUpdateCancelResponse(BaseModel):
+ """CancelKnowledgeBaseIndexingJobOutput description"""
+
job: Optional[APIIndexingJob] = None
"""IndexingJob description"""
diff --git a/src/gradient/types/model_list_response.py b/src/gradient/types/model_list_response.py
index 12d95437..48e17809 100644
--- a/src/gradient/types/model_list_response.py
+++ b/src/gradient/types/model_list_response.py
@@ -11,6 +11,8 @@
class ModelListResponse(BaseModel):
+ """A list of models"""
+
links: Optional[APILinks] = None
"""Links to other pages"""
diff --git a/src/gradient/types/models/providers/anthropic_create_response.py b/src/gradient/types/models/providers/anthropic_create_response.py
index 0fbe50bc..0609a486 100644
--- a/src/gradient/types/models/providers/anthropic_create_response.py
+++ b/src/gradient/types/models/providers/anthropic_create_response.py
@@ -9,5 +9,9 @@
class AnthropicCreateResponse(BaseModel):
+ """
+ CreateAnthropicAPIKeyOutput is used to return the newly created Anthropic API key.
+ """
+
api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
"""Anthropic API Key Info"""
diff --git a/src/gradient/types/models/providers/anthropic_delete_response.py b/src/gradient/types/models/providers/anthropic_delete_response.py
index b4fdd978..3ad6a9c6 100644
--- a/src/gradient/types/models/providers/anthropic_delete_response.py
+++ b/src/gradient/types/models/providers/anthropic_delete_response.py
@@ -9,5 +9,7 @@
class AnthropicDeleteResponse(BaseModel):
+ """DeleteAnthropicAPIKeyOutput is used to return the deleted Anthropic API key."""
+
api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
"""Anthropic API Key Info"""
diff --git a/src/gradient/types/models/providers/anthropic_list_agents_response.py b/src/gradient/types/models/providers/anthropic_list_agents_response.py
index a1525275..8e2186cb 100644
--- a/src/gradient/types/models/providers/anthropic_list_agents_response.py
+++ b/src/gradient/types/models/providers/anthropic_list_agents_response.py
@@ -12,6 +12,8 @@
class AnthropicListAgentsResponse(BaseModel):
+ """List of Agents that linked to a specific Anthropic Key"""
+
agents: Optional[List["APIAgent"]] = None
links: Optional[APILinks] = None
diff --git a/src/gradient/types/models/providers/anthropic_list_response.py b/src/gradient/types/models/providers/anthropic_list_response.py
index 24d6547a..458bd311 100644
--- a/src/gradient/types/models/providers/anthropic_list_response.py
+++ b/src/gradient/types/models/providers/anthropic_list_response.py
@@ -11,6 +11,10 @@
class AnthropicListResponse(BaseModel):
+ """
+ ListAnthropicAPIKeysOutput is used to return the list of Anthropic API keys for a specific agent.
+ """
+
api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
"""Api key infos"""
diff --git a/src/gradient/types/models/providers/anthropic_update_response.py b/src/gradient/types/models/providers/anthropic_update_response.py
index 3a6daaea..3e24273c 100644
--- a/src/gradient/types/models/providers/anthropic_update_response.py
+++ b/src/gradient/types/models/providers/anthropic_update_response.py
@@ -9,5 +9,7 @@
class AnthropicUpdateResponse(BaseModel):
+ """UpdateAnthropicAPIKeyOutput is used to return the updated Anthropic API key."""
+
api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
"""Anthropic API Key Info"""
diff --git a/src/gradient/types/models/providers/openai_create_response.py b/src/gradient/types/models/providers/openai_create_response.py
index b2e94766..16aff373 100644
--- a/src/gradient/types/models/providers/openai_create_response.py
+++ b/src/gradient/types/models/providers/openai_create_response.py
@@ -9,5 +9,7 @@
class OpenAICreateResponse(BaseModel):
+ """CreateOpenAIAPIKeyOutput is used to return the newly created OpenAI API key."""
+
api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
"""OpenAI API Key Info"""
diff --git a/src/gradient/types/models/providers/openai_delete_response.py b/src/gradient/types/models/providers/openai_delete_response.py
index e59c89fe..d73a681e 100644
--- a/src/gradient/types/models/providers/openai_delete_response.py
+++ b/src/gradient/types/models/providers/openai_delete_response.py
@@ -9,5 +9,7 @@
class OpenAIDeleteResponse(BaseModel):
+ """DeleteOpenAIAPIKeyOutput is used to return the deleted OpenAI API key."""
+
api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
"""OpenAI API Key Info"""
diff --git a/src/gradient/types/models/providers/openai_list_response.py b/src/gradient/types/models/providers/openai_list_response.py
index 698cd11e..825ac890 100644
--- a/src/gradient/types/models/providers/openai_list_response.py
+++ b/src/gradient/types/models/providers/openai_list_response.py
@@ -11,6 +11,10 @@
class OpenAIListResponse(BaseModel):
+ """
+ ListOpenAIAPIKeysOutput is used to return the list of OpenAI API keys for a specific agent.
+ """
+
api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
"""Api key infos"""
diff --git a/src/gradient/types/models/providers/openai_retrieve_agents_response.py b/src/gradient/types/models/providers/openai_retrieve_agents_response.py
index 717a56cd..f2266f8f 100644
--- a/src/gradient/types/models/providers/openai_retrieve_agents_response.py
+++ b/src/gradient/types/models/providers/openai_retrieve_agents_response.py
@@ -12,6 +12,8 @@
class OpenAIRetrieveAgentsResponse(BaseModel):
+ """List of Agents that are linked to a specific OpenAI Key"""
+
agents: Optional[List["APIAgent"]] = None
links: Optional[APILinks] = None
diff --git a/src/gradient/types/models/providers/openai_update_response.py b/src/gradient/types/models/providers/openai_update_response.py
index ec7a1c94..b94a8efe 100644
--- a/src/gradient/types/models/providers/openai_update_response.py
+++ b/src/gradient/types/models/providers/openai_update_response.py
@@ -9,5 +9,7 @@
class OpenAIUpdateResponse(BaseModel):
+ """UpdateOpenAIAPIKeyOutput is used to return the updated OpenAI API key."""
+
api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
"""OpenAI API Key Info"""
diff --git a/src/gradient/types/nf_initiate_action_params.py b/src/gradient/types/nf_initiate_action_params.py
index a187f56d..4b297210 100644
--- a/src/gradient/types/nf_initiate_action_params.py
+++ b/src/gradient/types/nf_initiate_action_params.py
@@ -11,6 +11,10 @@
"NfsActionResizeParams",
"NfsActionSnapshot",
"NfsActionSnapshotParams",
+ "NfsActionAttach",
+ "NfsActionAttachParams",
+ "NfsActionDetach",
+ "NfsActionDetachParams",
]
@@ -44,4 +48,34 @@ class NfsActionSnapshotParams(TypedDict, total=False):
"""Snapshot name of the NFS share"""
-NfInitiateActionParams: TypeAlias = Union[NfsActionResize, NfsActionSnapshot]
+class NfsActionAttach(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionAttachParams
+
+
+class NfsActionAttachParams(TypedDict, total=False):
+ vpc_id: Required[str]
+ """The ID of the VPC to which the NFS share will be attached"""
+
+
+class NfsActionDetach(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionDetachParams
+
+
+class NfsActionDetachParams(TypedDict, total=False):
+ vpc_id: Required[str]
+ """The ID of the VPC from which the NFS share will be detached"""
+
+
+NfInitiateActionParams: TypeAlias = Union[NfsActionResize, NfsActionSnapshot, NfsActionAttach, NfsActionDetach]
diff --git a/src/gradient/types/nf_initiate_action_response.py b/src/gradient/types/nf_initiate_action_response.py
index 9f38a4b2..58618450 100644
--- a/src/gradient/types/nf_initiate_action_response.py
+++ b/src/gradient/types/nf_initiate_action_response.py
@@ -9,6 +9,8 @@
class Action(BaseModel):
+ """The action that was submitted."""
+
region_slug: str
"""The DigitalOcean region slug where the resource is located."""
@@ -29,5 +31,7 @@ class Action(BaseModel):
class NfInitiateActionResponse(BaseModel):
+ """Action response of an NFS share."""
+
action: Action
"""The action that was submitted."""
diff --git a/src/gradient/types/nfs/snapshot_list_response.py b/src/gradient/types/nfs/snapshot_list_response.py
index 8a6864dc..a46342ce 100644
--- a/src/gradient/types/nfs/snapshot_list_response.py
+++ b/src/gradient/types/nfs/snapshot_list_response.py
@@ -10,6 +10,8 @@
class Snapshot(BaseModel):
+ """Represents an NFS snapshot."""
+
id: str
"""The unique identifier of the snapshot."""
diff --git a/src/gradient/types/nfs/snapshot_retrieve_response.py b/src/gradient/types/nfs/snapshot_retrieve_response.py
index 2d54d523..12a69d0a 100644
--- a/src/gradient/types/nfs/snapshot_retrieve_response.py
+++ b/src/gradient/types/nfs/snapshot_retrieve_response.py
@@ -10,6 +10,8 @@
class Snapshot(BaseModel):
+ """Represents an NFS snapshot."""
+
id: str
"""The unique identifier of the snapshot."""
diff --git a/src/gradient/types/retrieve_documents_params.py b/src/gradient/types/retrieve_documents_params.py
new file mode 100644
index 00000000..968d0211
--- /dev/null
+++ b/src/gradient/types/retrieve_documents_params.py
@@ -0,0 +1,73 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = ["RetrieveDocumentsParams", "Filters", "FiltersMust", "FiltersMustNot", "FiltersShould"]
+
+
+class RetrieveDocumentsParams(TypedDict, total=False):
+ num_results: Required[int]
+ """Number of results to return"""
+
+ query: Required[str]
+ """The search query text"""
+
+ alpha: float
+ """Weight for hybrid search (0-1):
+
+ - 0 = pure keyword search (BM25)
+ - 1 = pure vector search (default)
+ - 0.5 = balanced hybrid search
+ """
+
+ filters: Filters
+ """Metadata filters to apply to the search"""
+
+
+class FiltersMust(TypedDict, total=False):
+ field: Required[str]
+ """Metadata field name"""
+
+ operator: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "not_in", "contains"]]
+ """Comparison operator"""
+
+ value: Required[Union[str, float, bool, SequenceNotStr[str]]]
+ """Value to compare against (type depends on field)"""
+
+
+class FiltersMustNot(TypedDict, total=False):
+ field: Required[str]
+ """Metadata field name"""
+
+ operator: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "not_in", "contains"]]
+ """Comparison operator"""
+
+ value: Required[Union[str, float, bool, SequenceNotStr[str]]]
+ """Value to compare against (type depends on field)"""
+
+
+class FiltersShould(TypedDict, total=False):
+ field: Required[str]
+ """Metadata field name"""
+
+ operator: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "not_in", "contains"]]
+ """Comparison operator"""
+
+ value: Required[Union[str, float, bool, SequenceNotStr[str]]]
+ """Value to compare against (type depends on field)"""
+
+
+class Filters(TypedDict, total=False):
+ must: Iterable[FiltersMust]
+ """All conditions must match (AND)"""
+
+ must_not: Iterable[FiltersMustNot]
+ """No conditions should match (NOT)"""
+
+ should: Iterable[FiltersShould]
+ """At least one condition must match (OR)"""
diff --git a/src/gradient/types/retrieve_documents_response.py b/src/gradient/types/retrieve_documents_response.py
new file mode 100644
index 00000000..79f3e8eb
--- /dev/null
+++ b/src/gradient/types/retrieve_documents_response.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List
+
+from .._models import BaseModel
+
+__all__ = ["RetrieveDocumentsResponse", "Result"]
+
+
+class Result(BaseModel):
+ metadata: Dict[str, object]
+ """Metadata associated with the document"""
+
+ text_content: str
+ """The text content of the document chunk"""
+
+
+class RetrieveDocumentsResponse(BaseModel):
+ results: List[Result]
+ """Array of retrieved document chunks"""
+
+ total_results: int
+ """Number of results returned"""
diff --git a/src/gradient/types/shared/action_link.py b/src/gradient/types/shared/action_link.py
index 78aec9ff..143c66e3 100644
--- a/src/gradient/types/shared/action_link.py
+++ b/src/gradient/types/shared/action_link.py
@@ -8,6 +8,8 @@
class ActionLink(BaseModel):
+ """The linked actions can be used to check the status of a Droplet's create event."""
+
id: Optional[int] = None
"""A unique numeric ID that can be used to identify and reference an action."""
diff --git a/src/gradient/types/shared/api_links.py b/src/gradient/types/shared/api_links.py
index 24b19cfe..ce9be06b 100644
--- a/src/gradient/types/shared/api_links.py
+++ b/src/gradient/types/shared/api_links.py
@@ -8,6 +8,8 @@
class Pages(BaseModel):
+ """Information about how to reach other pages"""
+
first: Optional[str] = None
"""First page"""
@@ -22,5 +24,7 @@ class Pages(BaseModel):
class APILinks(BaseModel):
+ """Links to other pages"""
+
pages: Optional[Pages] = None
"""Information about how to reach other pages"""
diff --git a/src/gradient/types/shared/api_meta.py b/src/gradient/types/shared/api_meta.py
index dc267527..1a8cdede 100644
--- a/src/gradient/types/shared/api_meta.py
+++ b/src/gradient/types/shared/api_meta.py
@@ -8,6 +8,8 @@
class APIMeta(BaseModel):
+ """Meta information about the data set"""
+
page: Optional[int] = None
"""The current page"""
diff --git a/src/gradient/types/shared/chat_completion_chunk.py b/src/gradient/types/shared/chat_completion_chunk.py
index ff705bf4..53d5a563 100644
--- a/src/gradient/types/shared/chat_completion_chunk.py
+++ b/src/gradient/types/shared/chat_completion_chunk.py
@@ -18,6 +18,8 @@
class ChoiceDeltaToolCallFunction(BaseModel):
+ """A chunk of a function that the model called."""
+
arguments: Optional[str] = None
"""
The arguments to call the function with, as generated by the model in JSON
@@ -44,6 +46,8 @@ class ChoiceDeltaToolCall(BaseModel):
class ChoiceDelta(BaseModel):
+ """A chat completion delta generated by streamed model responses."""
+
content: Optional[str] = None
"""The contents of the chunk message."""
@@ -60,6 +64,8 @@ class ChoiceDelta(BaseModel):
class ChoiceLogprobs(BaseModel):
+ """Log probability information for the choice."""
+
content: Optional[List[ChatCompletionTokenLogprob]] = None
"""A list of message content tokens with log probability information."""
@@ -87,6 +93,11 @@ class Choice(BaseModel):
class ChatCompletionChunk(BaseModel):
+ """
+ Represents a streamed chunk of a chat completion response returned
+ by the model, based on the provided input.
+ """
+
id: str
"""A unique identifier for the chat completion. Each chunk has the same ID."""
diff --git a/src/gradient/types/shared/completion_usage.py b/src/gradient/types/shared/completion_usage.py
index a2012eef..79ce64ee 100644
--- a/src/gradient/types/shared/completion_usage.py
+++ b/src/gradient/types/shared/completion_usage.py
@@ -6,6 +6,8 @@
class CompletionUsage(BaseModel):
+ """Usage statistics for the completion request."""
+
completion_tokens: int
"""Number of tokens in the generated completion."""
diff --git a/src/gradient/types/shared/droplet.py b/src/gradient/types/shared/droplet.py
index 9d2bb17c..4ae5bae4 100644
--- a/src/gradient/types/shared/droplet.py
+++ b/src/gradient/types/shared/droplet.py
@@ -19,6 +19,11 @@
class Networks(BaseModel):
+ """The details of the network that are configured for the Droplet instance.
+
+ This is an object that contains keys for IPv4 and IPv6. The value of each of these is an array that contains objects describing an individual IP resource allocated to the Droplet. These will define attributes like the IP address, netmask, and gateway of the specific network depending on the type of network it is.
+ """
+
v4: Optional[List[NetworkV4]] = None
v6: Optional[List[NetworkV6]] = None
diff --git a/src/gradient/types/shared/gpu_info.py b/src/gradient/types/shared/gpu_info.py
index a285dd23..7f9d7329 100644
--- a/src/gradient/types/shared/gpu_info.py
+++ b/src/gradient/types/shared/gpu_info.py
@@ -16,6 +16,10 @@ class Vram(BaseModel):
class GPUInfo(BaseModel):
+ """
+ An object containing information about the GPU capabilities of Droplets created with this size.
+ """
+
count: Optional[int] = None
"""The number of GPUs allocated to the Droplet."""
diff --git a/src/gradient/types/shared/image_gen_completed_event.py b/src/gradient/types/shared/image_gen_completed_event.py
index cbb282e5..de44188f 100644
--- a/src/gradient/types/shared/image_gen_completed_event.py
+++ b/src/gradient/types/shared/image_gen_completed_event.py
@@ -8,6 +8,8 @@
class UsageInputTokensDetails(BaseModel):
+ """The input tokens detailed information for the image generation."""
+
image_tokens: int
"""The number of image tokens in the input prompt."""
@@ -16,6 +18,8 @@ class UsageInputTokensDetails(BaseModel):
class Usage(BaseModel):
+ """For `gpt-image-1` only, the token usage information for the image generation."""
+
input_tokens: int
"""The number of tokens (images and text) in the input prompt."""
@@ -30,6 +34,8 @@ class Usage(BaseModel):
class ImageGenCompletedEvent(BaseModel):
+ """Emitted when image generation has completed and the final image is available."""
+
b64_json: str
"""Base64-encoded image data, suitable for rendering as an image."""
diff --git a/src/gradient/types/shared/image_gen_partial_image_event.py b/src/gradient/types/shared/image_gen_partial_image_event.py
index 4cc704b2..e2740e08 100644
--- a/src/gradient/types/shared/image_gen_partial_image_event.py
+++ b/src/gradient/types/shared/image_gen_partial_image_event.py
@@ -8,6 +8,8 @@
class ImageGenPartialImageEvent(BaseModel):
+ """Emitted when a partial image is available during image generation streaming."""
+
b64_json: str
"""Base64-encoded partial image data, suitable for rendering as an image."""
diff --git a/src/gradient/types/shared/kernel.py b/src/gradient/types/shared/kernel.py
index 78a63427..79091d33 100644
--- a/src/gradient/types/shared/kernel.py
+++ b/src/gradient/types/shared/kernel.py
@@ -8,6 +8,15 @@
class Kernel(BaseModel):
+ """
+ **Note**: All Droplets created after March 2017 use internal kernels by default.
+ These Droplets will have this attribute set to `null`.
+
+ The current [kernel](https://docs.digitalocean.com/products/droplets/how-to/kernel/)
+ for Droplets with externally managed kernels. This will initially be set to
+ the kernel of the base image when the Droplet is created.
+ """
+
id: Optional[int] = None
"""A unique number used to identify and reference a specific kernel."""
diff --git a/src/gradient/types/shared/meta_properties.py b/src/gradient/types/shared/meta_properties.py
index a78a64d6..b7d703df 100644
--- a/src/gradient/types/shared/meta_properties.py
+++ b/src/gradient/types/shared/meta_properties.py
@@ -8,5 +8,7 @@
class MetaProperties(BaseModel):
+ """Information about the response itself."""
+
total: Optional[int] = None
"""Number of objects returned by the request."""
diff --git a/tests/api_resources/test_nfs.py b/tests/api_resources/test_nfs.py
index f2749330..6969ee96 100644
--- a/tests/api_resources/test_nfs.py
+++ b/tests/api_resources/test_nfs.py
@@ -313,6 +313,128 @@ def test_path_params_initiate_action_overload_2(self, client: Gradient) -> None:
type="resize",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_3(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_3(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ params={"vpc_id": "vpc-id-123"},
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_3(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_3(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_3(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ region="atl1",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_4(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_4(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ params={"vpc_id": "vpc-id-123"},
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_4(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_4(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_4(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ region="atl1",
+ type="resize",
+ )
+
class TestAsyncNfs:
parametrize = pytest.mark.parametrize(
@@ -609,3 +731,125 @@ async def test_path_params_initiate_action_overload_2(self, async_client: AsyncG
region="atl1",
type="resize",
)
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_3(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_3(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ params={"vpc_id": "vpc-id-123"},
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_3(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_3(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_3(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ region="atl1",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_4(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_4(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ params={"vpc_id": "vpc-id-123"},
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_4(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_4(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_4(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ region="atl1",
+ type="resize",
+ )
diff --git a/tests/api_resources/test_retrieve.py b/tests/api_resources/test_retrieve.py
new file mode 100644
index 00000000..167d2a96
--- /dev/null
+++ b/tests/api_resources/test_retrieve.py
@@ -0,0 +1,192 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import RetrieveDocumentsResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRetrieve:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_documents(self, client: Gradient) -> None:
+ retrieve = client.retrieve.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_documents_with_all_params(self, client: Gradient) -> None:
+ retrieve = client.retrieve.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ alpha=0.75,
+ filters={
+ "must": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ "must_not": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ "should": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ },
+ )
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_documents(self, client: Gradient) -> None:
+ response = client.retrieve.with_raw_response.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ retrieve = response.parse()
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_documents(self, client: Gradient) -> None:
+ with client.retrieve.with_streaming_response.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ retrieve = response.parse()
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_documents(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_id` but received ''"):
+ client.retrieve.with_raw_response.documents(
+ knowledge_base_id="",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+
+
+class TestAsyncRetrieve:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_documents(self, async_client: AsyncGradient) -> None:
+ retrieve = await async_client.retrieve.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_documents_with_all_params(self, async_client: AsyncGradient) -> None:
+ retrieve = await async_client.retrieve.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ alpha=0.75,
+ filters={
+ "must": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ "must_not": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ "should": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ },
+ )
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_documents(self, async_client: AsyncGradient) -> None:
+ response = await async_client.retrieve.with_raw_response.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ retrieve = await response.parse()
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_documents(self, async_client: AsyncGradient) -> None:
+ async with async_client.retrieve.with_streaming_response.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ retrieve = await response.parse()
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_documents(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_id` but received ''"):
+ await async_client.retrieve.with_raw_response.documents(
+ knowledge_base_id="",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )