diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
new file mode 100644
index 00000000..2bc5b4b2
--- /dev/null
+++ b/.github/workflows/publish-pypi.yml
@@ -0,0 +1,31 @@
+# This workflow is triggered when a GitHub release is created.
+# It can also be run manually to re-publish to PyPI in case it failed for some reason.
+# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml
+name: Publish PyPI
+on:
+ workflow_dispatch:
+
+ release:
+ types: [published]
+
+jobs:
+ publish:
+ name: publish
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install Rye
+ run: |
+ curl -sSf https://rye.astral.sh/get | bash
+ echo "$HOME/.rye/shims" >> $GITHUB_PATH
+ env:
+ RYE_VERSION: '0.44.0'
+ RYE_INSTALL_OPTION: '--yes'
+
+ - name: Publish to PyPI
+ run: |
+ bash ./bin/publish-pypi
+ env:
+ PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }}
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
new file mode 100644
index 00000000..0f23cbc4
--- /dev/null
+++ b/.github/workflows/release-doctor.yml
@@ -0,0 +1,21 @@
+name: Release Doctor
+on:
+ pull_request:
+ branches:
+ - main
+ workflow_dispatch:
+
+jobs:
+ release_doctor:
+ name: release doctor
+ runs-on: ubuntu-latest
+ if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Check release environment
+ run: |
+ bash ./bin/check-release-environment
+ env:
+ PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }}
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
new file mode 100644
index 00000000..ba6c3483
--- /dev/null
+++ b/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ ".": "0.1.0-alpha.1"
+}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 84a850f9..6b91fe37 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 126
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-bdf24159c6ebb5402d6c05a5165cb1501dc37cf6c664baa9eb318efb0f89dddd.yml
-openapi_spec_hash: 686329a97002025d118dc2367755c18d
-config_hash: 39a1554af43cd406e37b5ed5c943649c
+configured_endpoints: 4
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml
+openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a
+config_hash: 2da74b81015f4ef6cad3a0bcb9025834
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 00000000..4b97798c
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,18 @@
+# Changelog
+
+## 0.1.0-alpha.1 (2025-06-04)
+
+Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/digitalocean/genai-python/compare/v0.0.1-alpha.0...v0.1.0-alpha.1)
+
+### Features
+
+* **api:** update via SDK Studio ([691923d](https://github.com/digitalocean/genai-python/commit/691923d9f60b5ebe5dc34c8227273d06448945e8))
+* **client:** add follow_redirects request option ([5a6d480](https://github.com/digitalocean/genai-python/commit/5a6d480aef6d4c5084f484d1b69e6f49568a8caf))
+
+
+### Chores
+
+* **docs:** remove reference to rye shell ([29febe9](https://github.com/digitalocean/genai-python/commit/29febe9affcb0ae41ec69f8aea3ae6ef53967537))
+* **docs:** remove unnecessary param examples ([35ec489](https://github.com/digitalocean/genai-python/commit/35ec48915a8bd750060634208e91bd98c905b53c))
+* update SDK settings ([f032621](https://github.com/digitalocean/genai-python/commit/f03262136aa46e9325ac2fae785bf48a56f0127b))
+* update SDK settings ([b2cf700](https://github.com/digitalocean/genai-python/commit/b2cf700a0419f7d6e3f23ee02747fe7766a05f98))
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 79f5523c..7d5d60a7 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -17,8 +17,7 @@ $ rye sync --all-features
You can then run scripts using `rye run python script.py` or by activating the virtual environment:
```sh
-$ rye shell
-# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work
+# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work
$ source .venv/bin/activate
# now you can omit the `rye run` prefix
@@ -63,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g
To install via git:
```sh
-$ pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git
+$ pip install git+ssh://git@github.com/digitalocean/genai-python.git
```
Alternatively, you can build from source and install the wheel file:
@@ -121,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel
### Publish with a GitHub workflow
-You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up.
+You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up.
### Publish manually
diff --git a/README.md b/README.md
index b9fcd7e8..bdaea964 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# Digitalocean Genai SDK Python API library
-[](https://pypi.org/project/digitalocean_genai_sdk/)
+[](https://pypi.org/project/do-genai/)
The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+
application. The library includes type definitions for all request params and response fields,
@@ -15,13 +15,10 @@ The REST API documentation can be found on [help.openai.com](https://help.openai
## Installation
```sh
-# install from this staging repo
-pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git
+# install from PyPI
+pip install --pre do-genai
```
-> [!NOTE]
-> Once this package is [published to PyPI](https://app.stainless.com/docs/guides/publish), this will become: `pip install --pre digitalocean_genai_sdk`
-
## Usage
The full API of this library can be found in [api.md](api.md).
@@ -36,8 +33,16 @@ client = DigitaloceanGenaiSDK(
), # This is the default and can be omitted
)
-assistants = client.assistants.list()
-print(assistants.first_id)
+create_response = client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+)
+print(create_response.id)
```
While you can provide an `api_key` keyword argument,
@@ -62,8 +67,16 @@ client = AsyncDigitaloceanGenaiSDK(
async def main() -> None:
- assistants = await client.assistants.list()
- print(assistants.first_id)
+ create_response = await client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
+ print(create_response.id)
asyncio.run(main())
@@ -89,43 +102,19 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK
client = DigitaloceanGenaiSDK()
-assistant_object = client.assistants.create(
- model="gpt-4o",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
-)
-print(assistant_object.tool_resources)
-```
-
-## File uploads
-
-Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
-
-```python
-from pathlib import Path
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
-
-client = DigitaloceanGenaiSDK()
-
-client.audio.transcribe_audio(
- file=Path("/path/to/file"),
- model="gpt-4o-transcribe",
+create_response = client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream_options={},
)
+print(create_response.stream_options)
```
-The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
-
## Handling errors
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `digitalocean_genai_sdk.APIConnectionError` is raised.
@@ -142,7 +131,15 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK
client = DigitaloceanGenaiSDK()
try:
- client.assistants.list()
+ client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
except digitalocean_genai_sdk.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__) # an underlying Exception, likely raised within httpx.
@@ -185,7 +182,15 @@ client = DigitaloceanGenaiSDK(
)
# Or, configure per-request:
-client.with_options(max_retries=5).assistants.list()
+client.with_options(max_retries=5).chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+)
```
### Timeouts
@@ -208,7 +213,15 @@ client = DigitaloceanGenaiSDK(
)
# Override per-request:
-client.with_options(timeout=5.0).assistants.list()
+client.with_options(timeout=5.0).chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+)
```
On timeout, an `APITimeoutError` is thrown.
@@ -249,16 +262,22 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
from digitalocean_genai_sdk import DigitaloceanGenaiSDK
client = DigitaloceanGenaiSDK()
-response = client.assistants.with_raw_response.list()
+response = client.chat.completions.with_raw_response.create(
+ messages=[{
+ "content": "string",
+ "role": "system",
+ }],
+ model="llama3-8b-instruct",
+)
print(response.headers.get('X-My-Header'))
-assistant = response.parse() # get the object that `assistants.list()` would have returned
-print(assistant.first_id)
+completion = response.parse() # get the object that `chat.completions.create()` would have returned
+print(completion.id)
```
-These methods return an [`APIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) object.
+These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) object.
-The async client returns an [`AsyncAPIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content.
+The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content.
#### `.with_streaming_response`
@@ -267,7 +286,15 @@ The above interface eagerly reads the full response body when you make the reque
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
```python
-with client.assistants.with_streaming_response.list() as response:
+with client.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+) as response:
print(response.headers.get("X-My-Header"))
for line in response.iter_lines():
@@ -362,7 +389,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.
-We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/issues) with questions, bugs, or suggestions.
+We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions.
### Determining the installed version
diff --git a/api.md b/api.md
index daea5075..90a1a7d9 100644
--- a/api.md
+++ b/api.md
@@ -1,65 +1,3 @@
-# Assistants
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import (
- AssistantObject,
- AssistantSupportedModels,
- AssistantToolsCode,
- AssistantToolsFileSearch,
- AssistantToolsFunction,
- AssistantsAPIResponseFormatOption,
- FileSearchRanker,
- FunctionObject,
- ReasoningEffort,
- AssistantListResponse,
- AssistantDeleteResponse,
-)
-```
-
-Methods:
-
-- client.assistants.create(\*\*params) -> AssistantObject
-- client.assistants.retrieve(assistant_id) -> AssistantObject
-- client.assistants.update(assistant_id, \*\*params) -> AssistantObject
-- client.assistants.list(\*\*params) -> AssistantListResponse
-- client.assistants.delete(assistant_id) -> AssistantDeleteResponse
-
-# Audio
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import (
- TranscriptionSegment,
- VoiceIDsShared,
- AudioTranscribeAudioResponse,
- AudioTranslateAudioResponse,
-)
-```
-
-Methods:
-
-- client.audio.generate_speech(\*\*params) -> BinaryAPIResponse
-- client.audio.transcribe_audio(\*\*params) -> AudioTranscribeAudioResponse
-- client.audio.translate_audio(\*\*params) -> AudioTranslateAudioResponse
-
-# Batches
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import Batch, BatchListResponse
-```
-
-Methods:
-
-- client.batches.create(\*\*params) -> Batch
-- client.batches.retrieve(batch_id) -> Batch
-- client.batches.list(\*\*params) -> BatchListResponse
-- client.batches.cancel(batch_id) -> Batch
-
# Chat
## Completions
@@ -70,48 +8,25 @@ Types:
from digitalocean_genai_sdk.types.chat import (
CreateModelProperties,
CreateResponse,
- MessageToolCall,
- ModelIDsShared,
RequestMessageContentPartText,
- ResponseFormatJsonObject,
- ResponseFormatJsonSchema,
- ResponseFormatText,
ResponseMessage,
TokenLogprob,
Usage,
- WebSearchContextSize,
- WebSearchLocation,
- CompletionListResponse,
- CompletionDeleteResponse,
- CompletionListMessagesResponse,
)
```
Methods:
- client.chat.completions.create(\*\*params) -> CreateResponse
-- client.chat.completions.retrieve(completion_id) -> CreateResponse
-- client.chat.completions.update(completion_id, \*\*params) -> CreateResponse
-- client.chat.completions.list(\*\*params) -> CompletionListResponse
-- client.chat.completions.delete(completion_id) -> CompletionDeleteResponse
-- client.chat.completions.list_messages(completion_id, \*\*params) -> CompletionListMessagesResponse
# Completions
Types:
```python
-from digitalocean_genai_sdk.types import (
- ChatCompletionStreamOptions,
- StopConfiguration,
- CompletionCreateResponse,
-)
+from digitalocean_genai_sdk.types import ChatCompletionStreamOptions, StopConfiguration
```
-Methods:
-
-- client.completions.create(\*\*params) -> CompletionCreateResponse
-
# Embeddings
Types:
@@ -124,504 +39,23 @@ Methods:
- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse
-# Files
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import (
- OpenAIFile,
- FileListResponse,
- FileDeleteResponse,
- FileRetrieveContentResponse,
-)
-```
-
-Methods:
-
-- client.files.retrieve(file_id) -> OpenAIFile
-- client.files.list(\*\*params) -> FileListResponse
-- client.files.delete(file_id) -> FileDeleteResponse
-- client.files.retrieve_content(file_id) -> str
-- client.files.upload(\*\*params) -> OpenAIFile
-
-# FineTuning
-
-## Checkpoints
-
-### Permissions
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.fine_tuning.checkpoints import (
- ListFineTuningCheckpointPermission,
- PermissionDeleteResponse,
-)
-```
-
-Methods:
-
-- client.fine_tuning.checkpoints.permissions.create(permission_id, \*\*params) -> ListFineTuningCheckpointPermission
-- client.fine_tuning.checkpoints.permissions.retrieve(permission_id, \*\*params) -> ListFineTuningCheckpointPermission
-- client.fine_tuning.checkpoints.permissions.delete(permission_id) -> PermissionDeleteResponse
-
-## Jobs
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.fine_tuning import FineTuneMethod, FineTuningJob, JobListResponse
-```
-
-Methods:
-
-- client.fine_tuning.jobs.create(\*\*params) -> FineTuningJob
-- client.fine_tuning.jobs.retrieve(fine_tuning_job_id) -> FineTuningJob
-- client.fine_tuning.jobs.list(\*\*params) -> JobListResponse
-- client.fine_tuning.jobs.cancel(fine_tuning_job_id) -> FineTuningJob
-
-### Checkpoints
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.fine_tuning.jobs import CheckpointRetrieveResponse
-```
-
-Methods:
-
-- client.fine_tuning.jobs.checkpoints.retrieve(fine_tuning_job_id, \*\*params) -> CheckpointRetrieveResponse
-
-### Events
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.fine_tuning.jobs import EventRetrieveResponse
-```
-
-Methods:
-
-- client.fine_tuning.jobs.events.retrieve(fine_tuning_job_id, \*\*params) -> EventRetrieveResponse
-
-# Images
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import ImagesResponse
-```
-
-Methods:
-
-- client.images.create_edit(\*\*params) -> ImagesResponse
-- client.images.create_generation(\*\*params) -> ImagesResponse
-- client.images.create_variation(\*\*params) -> ImagesResponse
-
# Models
Types:
```python
-from digitalocean_genai_sdk.types import Model, ModelListResponse, ModelDeleteResponse
+from digitalocean_genai_sdk.types import Model, ModelListResponse
```
Methods:
- client.models.retrieve(model) -> Model
- client.models.list() -> ModelListResponse
-- client.models.delete(model) -> ModelDeleteResponse
-
-# Moderations
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import ModerationClassifyResponse
-```
-
-Methods:
-
-- client.moderations.classify(\*\*params) -> ModerationClassifyResponse
-
-# Organization
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import (
- AuditLogActorUser,
- AuditLogEventType,
- UsageResponse,
- OrganizationListAuditLogsResponse,
-)
-```
-
-Methods:
-
-- client.organization.get_costs(\*\*params) -> UsageResponse
-- client.organization.list_audit_logs(\*\*params) -> OrganizationListAuditLogsResponse
-
-## AdminAPIKeys
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.organization import (
- AdminAPIKey,
- AdminAPIKeyListResponse,
- AdminAPIKeyDeleteResponse,
-)
-```
-
-Methods:
-
-- client.organization.admin_api_keys.create(\*\*params) -> AdminAPIKey
-- client.organization.admin_api_keys.retrieve(key_id) -> AdminAPIKey
-- client.organization.admin_api_keys.list(\*\*params) -> AdminAPIKeyListResponse
-- client.organization.admin_api_keys.delete(key_id) -> AdminAPIKeyDeleteResponse
-
-## Invites
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.organization import (
- Invite,
- InviteListResponse,
- InviteDeleteResponse,
-)
-```
-
-Methods:
-
-- client.organization.invites.create(\*\*params) -> Invite
-- client.organization.invites.retrieve(invite_id) -> Invite
-- client.organization.invites.list(\*\*params) -> InviteListResponse
-- client.organization.invites.delete(invite_id) -> InviteDeleteResponse
-
-## Projects
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.organization import Project, ProjectListResponse
-```
-
-Methods:
-
-- client.organization.projects.create(\*\*params) -> Project
-- client.organization.projects.retrieve(project_id) -> Project
-- client.organization.projects.update(project_id, \*\*params) -> Project
-- client.organization.projects.list(\*\*params) -> ProjectListResponse
-- client.organization.projects.archive(project_id) -> Project
-
-### APIKeys
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.organization.projects import (
- APIKey,
- APIKeyListResponse,
- APIKeyDeleteResponse,
-)
-```
-
-Methods:
-
-- client.organization.projects.api_keys.retrieve(key_id, \*, project_id) -> APIKey
-- client.organization.projects.api_keys.list(project_id, \*\*params) -> APIKeyListResponse
-- client.organization.projects.api_keys.delete(key_id, \*, project_id) -> APIKeyDeleteResponse
-
-### RateLimits
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.organization.projects import RateLimit, RateLimitListResponse
-```
-
-Methods:
-
-- client.organization.projects.rate_limits.update(rate_limit_id, \*, project_id, \*\*params) -> RateLimit
-- client.organization.projects.rate_limits.list(project_id, \*\*params) -> RateLimitListResponse
-
-### ServiceAccounts
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.organization.projects import (
- ServiceAccount,
- ServiceAccountCreateResponse,
- ServiceAccountListResponse,
- ServiceAccountDeleteResponse,
-)
-```
-
-Methods:
-
-- client.organization.projects.service_accounts.create(project_id, \*\*params) -> ServiceAccountCreateResponse
-- client.organization.projects.service_accounts.retrieve(service_account_id, \*, project_id) -> ServiceAccount
-- client.organization.projects.service_accounts.list(project_id, \*\*params) -> ServiceAccountListResponse
-- client.organization.projects.service_accounts.delete(service_account_id, \*, project_id) -> ServiceAccountDeleteResponse
-
-### Users
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.organization.projects import (
- ProjectUser,
- UserListResponse,
- UserDeleteResponse,
-)
-```
-
-Methods:
-
-- client.organization.projects.users.retrieve(user_id, \*, project_id) -> ProjectUser
-- client.organization.projects.users.update(user_id, \*, project_id, \*\*params) -> ProjectUser
-- client.organization.projects.users.list(project_id, \*\*params) -> UserListResponse
-- client.organization.projects.users.delete(user_id, \*, project_id) -> UserDeleteResponse
-- client.organization.projects.users.add(project_id, \*\*params) -> ProjectUser
-
-## Usage
-
-Methods:
-
-- client.organization.usage.audio_speeches(\*\*params) -> UsageResponse
-- client.organization.usage.audio_transcriptions(\*\*params) -> UsageResponse
-- client.organization.usage.code_interpreter_sessions(\*\*params) -> UsageResponse
-- client.organization.usage.completions(\*\*params) -> UsageResponse
-- client.organization.usage.embeddings(\*\*params) -> UsageResponse
-- client.organization.usage.images(\*\*params) -> UsageResponse
-- client.organization.usage.moderations(\*\*params) -> UsageResponse
-- client.organization.usage.vector_stores(\*\*params) -> UsageResponse
-
-## Users
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.organization import (
- OrganizationUser,
- UserListResponse,
- UserDeleteResponse,
-)
-```
-
-Methods:
-
-- client.organization.users.retrieve(user_id) -> OrganizationUser
-- client.organization.users.update(user_id, \*\*params) -> OrganizationUser
-- client.organization.users.list(\*\*params) -> UserListResponse
-- client.organization.users.delete(user_id) -> UserDeleteResponse
-
-# Realtime
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import (
- RealtimeCreateSessionResponse,
- RealtimeCreateTranscriptionSessionResponse,
-)
-```
-
-Methods:
-
-- client.realtime.create_session(\*\*params) -> RealtimeCreateSessionResponse
-- client.realtime.create_transcription_session(\*\*params) -> RealtimeCreateTranscriptionSessionResponse
# Responses
Types:
```python
-from digitalocean_genai_sdk.types import (
- ComputerToolCall,
- ComputerToolCallOutput,
- ComputerToolCallSafetyCheck,
- FileSearchToolCall,
- FunctionToolCall,
- FunctionToolCallOutput,
- Includable,
- InputContent,
- InputMessage,
- ModelResponseProperties,
- OutputMessage,
- ReasoningItem,
- Response,
- ResponseProperties,
- WebSearchToolCall,
- ResponseListInputItemsResponse,
-)
+from digitalocean_genai_sdk.types import ModelResponseProperties
```
-
-Methods:
-
-- client.responses.create(\*\*params) -> Response
-- client.responses.retrieve(response_id, \*\*params) -> Response
-- client.responses.delete(response_id) -> None
-- client.responses.list_input_items(response_id, \*\*params) -> ResponseListInputItemsResponse
-
-# Threads
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import CreateThreadRequest, ThreadObject, ThreadDeleteResponse
-```
-
-Methods:
-
-- client.threads.create(\*\*params) -> ThreadObject
-- client.threads.retrieve(thread_id) -> ThreadObject
-- client.threads.update(thread_id, \*\*params) -> ThreadObject
-- client.threads.delete(thread_id) -> ThreadDeleteResponse
-
-## Runs
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.threads import (
- AssistantsAPIToolChoiceOption,
- RunObject,
- TruncationObject,
- RunListResponse,
-)
-```
-
-Methods:
-
-- client.threads.runs.create(\*\*params) -> RunObject
-- client.threads.runs.retrieve(run_id, \*, thread_id) -> RunObject
-- client.threads.runs.update(run_id, \*, thread_id, \*\*params) -> RunObject
-- client.threads.runs.list(thread_id, \*\*params) -> RunListResponse
-- client.threads.runs.cancel(run_id, \*, thread_id) -> RunObject
-- client.threads.runs.create_run(thread_id, \*\*params) -> RunObject
-- client.threads.runs.submit_tool_outputs(run_id, \*, thread_id, \*\*params) -> RunObject
-
-### Steps
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.threads.runs import RunStepObject, StepListResponse
-```
-
-Methods:
-
-- client.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id, \*\*params) -> RunStepObject
-- client.threads.runs.steps.list(run_id, \*, thread_id, \*\*params) -> StepListResponse
-
-## Messages
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.threads import (
- AssistantToolsFileSearchTypeOnly,
- CreateMessageRequest,
- MessageContentImageFileObject,
- MessageContentImageURLObject,
- MessageObject,
- MessageListResponse,
- MessageDeleteResponse,
-)
-```
-
-Methods:
-
-- client.threads.messages.create(thread_id, \*\*params) -> MessageObject
-- client.threads.messages.retrieve(message_id, \*, thread_id) -> MessageObject
-- client.threads.messages.update(message_id, \*, thread_id, \*\*params) -> MessageObject
-- client.threads.messages.list(thread_id, \*\*params) -> MessageListResponse
-- client.threads.messages.delete(message_id, \*, thread_id) -> MessageDeleteResponse
-
-# Uploads
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import Upload, UploadAddPartResponse
-```
-
-Methods:
-
-- client.uploads.create(\*\*params) -> Upload
-- client.uploads.add_part(upload_id, \*\*params) -> UploadAddPartResponse
-- client.uploads.cancel(upload_id) -> Upload
-- client.uploads.complete(upload_id, \*\*params) -> Upload
-
-# VectorStores
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types import (
- AutoChunkingStrategyRequestParam,
- ComparisonFilter,
- CompoundFilter,
- StaticChunkingStrategy,
- StaticChunkingStrategyRequestParam,
- VectorStoreExpirationAfter,
- VectorStoreObject,
- VectorStoreListResponse,
- VectorStoreDeleteResponse,
- VectorStoreSearchResponse,
-)
-```
-
-Methods:
-
-- client.vector_stores.create(\*\*params) -> VectorStoreObject
-- client.vector_stores.retrieve(vector_store_id) -> VectorStoreObject
-- client.vector_stores.update(vector_store_id, \*\*params) -> VectorStoreObject
-- client.vector_stores.list(\*\*params) -> VectorStoreListResponse
-- client.vector_stores.delete(vector_store_id) -> VectorStoreDeleteResponse
-- client.vector_stores.search(vector_store_id, \*\*params) -> VectorStoreSearchResponse
-
-## FileBatches
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.vector_stores import (
- ChunkingStrategyRequestParam,
- ListVectorStoreFilesResponse,
- VectorStoreFileBatchObject,
-)
-```
-
-Methods:
-
-- client.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatchObject
-- client.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatchObject
-- client.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatchObject
-- client.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> ListVectorStoreFilesResponse
-
-## Files
-
-Types:
-
-```python
-from digitalocean_genai_sdk.types.vector_stores import (
- VectorStoreFileObject,
- FileDeleteResponse,
- FileRetrieveContentResponse,
-)
-```
-
-Methods:
-
-- client.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFileObject
-- client.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFileObject
-- client.vector_stores.files.update(file_id, \*, vector_store_id, \*\*params) -> VectorStoreFileObject
-- client.vector_stores.files.list(vector_store_id, \*\*params) -> ListVectorStoreFilesResponse
-- client.vector_stores.files.delete(file_id, \*, vector_store_id) -> FileDeleteResponse
-- client.vector_stores.files.retrieve_content(file_id, \*, vector_store_id) -> FileRetrieveContentResponse
diff --git a/bin/check-release-environment b/bin/check-release-environment
new file mode 100644
index 00000000..9e89a88a
--- /dev/null
+++ b/bin/check-release-environment
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+errors=()
+
+if [ -z "${PYPI_TOKEN}" ]; then
+ errors+=("The DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.")
+fi
+
+lenErrors=${#errors[@]}
+
+if [[ lenErrors -gt 0 ]]; then
+ echo -e "Found the following errors in the release environment:\n"
+
+ for error in "${errors[@]}"; do
+ echo -e "- $error\n"
+ done
+
+ exit 1
+fi
+
+echo "The environment is ready to push releases!"
diff --git a/pyproject.toml b/pyproject.toml
index 33ffc05d..ae9ff037 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
-name = "digitalocean_genai_sdk"
-version = "0.0.1-alpha.0"
+name = "do-genai"
+version = "0.1.0-alpha.1"
description = "The official Python library for the digitalocean-genai-sdk API"
dynamic = ["readme"]
license = "MIT"
@@ -34,8 +34,8 @@ classifiers = [
]
[project.urls]
-Homepage = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python"
-Repository = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python"
+Homepage = "https://github.com/digitalocean/genai-python"
+Repository = "https://github.com/digitalocean/genai-python"
[tool.rye]
@@ -121,7 +121,7 @@ path = "README.md"
[[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]]
# replace relative links with absolute links
pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)'
-replacement = '[\1](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/\g<2>)'
+replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)'
[tool.pytest.ini_options]
testpaths = ["tests"]
diff --git a/release-please-config.json b/release-please-config.json
new file mode 100644
index 00000000..234b9475
--- /dev/null
+++ b/release-please-config.json
@@ -0,0 +1,66 @@
+{
+ "packages": {
+ ".": {}
+ },
+ "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json",
+ "include-v-in-tag": true,
+ "include-component-in-tag": false,
+ "versioning": "prerelease",
+ "prerelease": true,
+ "bump-minor-pre-major": true,
+ "bump-patch-for-minor-pre-major": false,
+ "pull-request-header": "Automated Release PR",
+ "pull-request-title-pattern": "release: ${version}",
+ "changelog-sections": [
+ {
+ "type": "feat",
+ "section": "Features"
+ },
+ {
+ "type": "fix",
+ "section": "Bug Fixes"
+ },
+ {
+ "type": "perf",
+ "section": "Performance Improvements"
+ },
+ {
+ "type": "revert",
+ "section": "Reverts"
+ },
+ {
+ "type": "chore",
+ "section": "Chores"
+ },
+ {
+ "type": "docs",
+ "section": "Documentation"
+ },
+ {
+ "type": "style",
+ "section": "Styles"
+ },
+ {
+ "type": "refactor",
+ "section": "Refactors"
+ },
+ {
+ "type": "test",
+ "section": "Tests",
+ "hidden": true
+ },
+ {
+ "type": "build",
+ "section": "Build System"
+ },
+ {
+ "type": "ci",
+ "section": "Continuous Integration",
+ "hidden": true
+ }
+ ],
+ "release-type": "python",
+ "extra-files": [
+ "src/digitalocean_genai_sdk/_version.py"
+ ]
+}
\ No newline at end of file
diff --git a/requirements-dev.lock b/requirements-dev.lock
index bf449af3..8a2680e6 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -13,7 +13,7 @@
annotated-types==0.6.0
# via pydantic
anyio==4.4.0
- # via digitalocean-genai-sdk
+ # via do-genai
# via httpx
argcomplete==3.1.2
# via nox
@@ -26,7 +26,7 @@ dirty-equals==0.6.0
distlib==0.3.7
# via virtualenv
distro==1.8.0
- # via digitalocean-genai-sdk
+ # via do-genai
exceptiongroup==1.2.2
# via anyio
# via pytest
@@ -37,7 +37,7 @@ h11==0.14.0
httpcore==1.0.2
# via httpx
httpx==0.28.1
- # via digitalocean-genai-sdk
+ # via do-genai
# via respx
idna==3.4
# via anyio
@@ -64,7 +64,7 @@ platformdirs==3.11.0
pluggy==1.5.0
# via pytest
pydantic==2.10.3
- # via digitalocean-genai-sdk
+ # via do-genai
pydantic-core==2.27.1
# via pydantic
pygments==2.18.0
@@ -86,14 +86,14 @@ six==1.16.0
# via python-dateutil
sniffio==1.3.0
# via anyio
- # via digitalocean-genai-sdk
+ # via do-genai
time-machine==2.9.0
tomli==2.0.2
# via mypy
# via pytest
typing-extensions==4.12.2
# via anyio
- # via digitalocean-genai-sdk
+ # via do-genai
# via mypy
# via pydantic
# via pydantic-core
diff --git a/requirements.lock b/requirements.lock
index e655776d..832a9acd 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -13,13 +13,13 @@
annotated-types==0.6.0
# via pydantic
anyio==4.4.0
- # via digitalocean-genai-sdk
+ # via do-genai
# via httpx
certifi==2023.7.22
# via httpcore
# via httpx
distro==1.8.0
- # via digitalocean-genai-sdk
+ # via do-genai
exceptiongroup==1.2.2
# via anyio
h11==0.14.0
@@ -27,19 +27,19 @@ h11==0.14.0
httpcore==1.0.2
# via httpx
httpx==0.28.1
- # via digitalocean-genai-sdk
+ # via do-genai
idna==3.4
# via anyio
# via httpx
pydantic==2.10.3
- # via digitalocean-genai-sdk
+ # via do-genai
pydantic-core==2.27.1
# via pydantic
sniffio==1.3.0
# via anyio
- # via digitalocean-genai-sdk
+ # via do-genai
typing-extensions==4.12.2
# via anyio
- # via digitalocean-genai-sdk
+ # via do-genai
# via pydantic
# via pydantic-core
diff --git a/src/digitalocean_genai_sdk/_base_client.py b/src/digitalocean_genai_sdk/_base_client.py
index 9f58b2f9..73cd30fc 100644
--- a/src/digitalocean_genai_sdk/_base_client.py
+++ b/src/digitalocean_genai_sdk/_base_client.py
@@ -960,6 +960,9 @@ def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
+ if options.follow_redirects is not None:
+ kwargs["follow_redirects"] = options.follow_redirects
+
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
response = None
@@ -1460,6 +1463,9 @@ async def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
+ if options.follow_redirects is not None:
+ kwargs["follow_redirects"] = options.follow_redirects
+
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
response = None
diff --git a/src/digitalocean_genai_sdk/_client.py b/src/digitalocean_genai_sdk/_client.py
index 99580b5e..8a0fb675 100644
--- a/src/digitalocean_genai_sdk/_client.py
+++ b/src/digitalocean_genai_sdk/_client.py
@@ -21,20 +21,7 @@
)
from ._utils import is_given, get_async_library
from ._version import __version__
-from .resources import (
- audio,
- files,
- images,
- models,
- batches,
- uploads,
- realtime,
- responses,
- assistants,
- embeddings,
- completions,
- moderations,
-)
+from .resources import models, embeddings
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import APIStatusError, DigitaloceanGenaiSDKError
from ._base_client import (
@@ -43,10 +30,6 @@
AsyncAPIClient,
)
from .resources.chat import chat
-from .resources.threads import threads
-from .resources.fine_tuning import fine_tuning
-from .resources.organization import organization
-from .resources.vector_stores import vector_stores
__all__ = [
"Timeout",
@@ -61,23 +44,9 @@
class DigitaloceanGenaiSDK(SyncAPIClient):
- assistants: assistants.AssistantsResource
- audio: audio.AudioResource
- batches: batches.BatchesResource
chat: chat.ChatResource
- completions: completions.CompletionsResource
embeddings: embeddings.EmbeddingsResource
- files: files.FilesResource
- fine_tuning: fine_tuning.FineTuningResource
- images: images.ImagesResource
models: models.ModelsResource
- moderations: moderations.ModerationsResource
- organization: organization.OrganizationResource
- realtime: realtime.RealtimeResource
- responses: responses.ResponsesResource
- threads: threads.ThreadsResource
- uploads: uploads.UploadsResource
- vector_stores: vector_stores.VectorStoresResource
with_raw_response: DigitaloceanGenaiSDKWithRawResponse
with_streaming_response: DigitaloceanGenaiSDKWithStreamedResponse
@@ -135,23 +104,9 @@ def __init__(
_strict_response_validation=_strict_response_validation,
)
- self.assistants = assistants.AssistantsResource(self)
- self.audio = audio.AudioResource(self)
- self.batches = batches.BatchesResource(self)
self.chat = chat.ChatResource(self)
- self.completions = completions.CompletionsResource(self)
self.embeddings = embeddings.EmbeddingsResource(self)
- self.files = files.FilesResource(self)
- self.fine_tuning = fine_tuning.FineTuningResource(self)
- self.images = images.ImagesResource(self)
self.models = models.ModelsResource(self)
- self.moderations = moderations.ModerationsResource(self)
- self.organization = organization.OrganizationResource(self)
- self.realtime = realtime.RealtimeResource(self)
- self.responses = responses.ResponsesResource(self)
- self.threads = threads.ThreadsResource(self)
- self.uploads = uploads.UploadsResource(self)
- self.vector_stores = vector_stores.VectorStoresResource(self)
self.with_raw_response = DigitaloceanGenaiSDKWithRawResponse(self)
self.with_streaming_response = DigitaloceanGenaiSDKWithStreamedResponse(self)
@@ -261,23 +216,9 @@ def _make_status_error(
class AsyncDigitaloceanGenaiSDK(AsyncAPIClient):
- assistants: assistants.AsyncAssistantsResource
- audio: audio.AsyncAudioResource
- batches: batches.AsyncBatchesResource
chat: chat.AsyncChatResource
- completions: completions.AsyncCompletionsResource
embeddings: embeddings.AsyncEmbeddingsResource
- files: files.AsyncFilesResource
- fine_tuning: fine_tuning.AsyncFineTuningResource
- images: images.AsyncImagesResource
models: models.AsyncModelsResource
- moderations: moderations.AsyncModerationsResource
- organization: organization.AsyncOrganizationResource
- realtime: realtime.AsyncRealtimeResource
- responses: responses.AsyncResponsesResource
- threads: threads.AsyncThreadsResource
- uploads: uploads.AsyncUploadsResource
- vector_stores: vector_stores.AsyncVectorStoresResource
with_raw_response: AsyncDigitaloceanGenaiSDKWithRawResponse
with_streaming_response: AsyncDigitaloceanGenaiSDKWithStreamedResponse
@@ -335,23 +276,9 @@ def __init__(
_strict_response_validation=_strict_response_validation,
)
- self.assistants = assistants.AsyncAssistantsResource(self)
- self.audio = audio.AsyncAudioResource(self)
- self.batches = batches.AsyncBatchesResource(self)
self.chat = chat.AsyncChatResource(self)
- self.completions = completions.AsyncCompletionsResource(self)
self.embeddings = embeddings.AsyncEmbeddingsResource(self)
- self.files = files.AsyncFilesResource(self)
- self.fine_tuning = fine_tuning.AsyncFineTuningResource(self)
- self.images = images.AsyncImagesResource(self)
self.models = models.AsyncModelsResource(self)
- self.moderations = moderations.AsyncModerationsResource(self)
- self.organization = organization.AsyncOrganizationResource(self)
- self.realtime = realtime.AsyncRealtimeResource(self)
- self.responses = responses.AsyncResponsesResource(self)
- self.threads = threads.AsyncThreadsResource(self)
- self.uploads = uploads.AsyncUploadsResource(self)
- self.vector_stores = vector_stores.AsyncVectorStoresResource(self)
self.with_raw_response = AsyncDigitaloceanGenaiSDKWithRawResponse(self)
self.with_streaming_response = AsyncDigitaloceanGenaiSDKWithStreamedResponse(self)
@@ -462,86 +389,30 @@ def _make_status_error(
class DigitaloceanGenaiSDKWithRawResponse:
def __init__(self, client: DigitaloceanGenaiSDK) -> None:
- self.assistants = assistants.AssistantsResourceWithRawResponse(client.assistants)
- self.audio = audio.AudioResourceWithRawResponse(client.audio)
- self.batches = batches.BatchesResourceWithRawResponse(client.batches)
self.chat = chat.ChatResourceWithRawResponse(client.chat)
- self.completions = completions.CompletionsResourceWithRawResponse(client.completions)
self.embeddings = embeddings.EmbeddingsResourceWithRawResponse(client.embeddings)
- self.files = files.FilesResourceWithRawResponse(client.files)
- self.fine_tuning = fine_tuning.FineTuningResourceWithRawResponse(client.fine_tuning)
- self.images = images.ImagesResourceWithRawResponse(client.images)
self.models = models.ModelsResourceWithRawResponse(client.models)
- self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations)
- self.organization = organization.OrganizationResourceWithRawResponse(client.organization)
- self.realtime = realtime.RealtimeResourceWithRawResponse(client.realtime)
- self.responses = responses.ResponsesResourceWithRawResponse(client.responses)
- self.threads = threads.ThreadsResourceWithRawResponse(client.threads)
- self.uploads = uploads.UploadsResourceWithRawResponse(client.uploads)
- self.vector_stores = vector_stores.VectorStoresResourceWithRawResponse(client.vector_stores)
class AsyncDigitaloceanGenaiSDKWithRawResponse:
def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None:
- self.assistants = assistants.AsyncAssistantsResourceWithRawResponse(client.assistants)
- self.audio = audio.AsyncAudioResourceWithRawResponse(client.audio)
- self.batches = batches.AsyncBatchesResourceWithRawResponse(client.batches)
self.chat = chat.AsyncChatResourceWithRawResponse(client.chat)
- self.completions = completions.AsyncCompletionsResourceWithRawResponse(client.completions)
self.embeddings = embeddings.AsyncEmbeddingsResourceWithRawResponse(client.embeddings)
- self.files = files.AsyncFilesResourceWithRawResponse(client.files)
- self.fine_tuning = fine_tuning.AsyncFineTuningResourceWithRawResponse(client.fine_tuning)
- self.images = images.AsyncImagesResourceWithRawResponse(client.images)
self.models = models.AsyncModelsResourceWithRawResponse(client.models)
- self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations)
- self.organization = organization.AsyncOrganizationResourceWithRawResponse(client.organization)
- self.realtime = realtime.AsyncRealtimeResourceWithRawResponse(client.realtime)
- self.responses = responses.AsyncResponsesResourceWithRawResponse(client.responses)
- self.threads = threads.AsyncThreadsResourceWithRawResponse(client.threads)
- self.uploads = uploads.AsyncUploadsResourceWithRawResponse(client.uploads)
- self.vector_stores = vector_stores.AsyncVectorStoresResourceWithRawResponse(client.vector_stores)
class DigitaloceanGenaiSDKWithStreamedResponse:
def __init__(self, client: DigitaloceanGenaiSDK) -> None:
- self.assistants = assistants.AssistantsResourceWithStreamingResponse(client.assistants)
- self.audio = audio.AudioResourceWithStreamingResponse(client.audio)
- self.batches = batches.BatchesResourceWithStreamingResponse(client.batches)
self.chat = chat.ChatResourceWithStreamingResponse(client.chat)
- self.completions = completions.CompletionsResourceWithStreamingResponse(client.completions)
self.embeddings = embeddings.EmbeddingsResourceWithStreamingResponse(client.embeddings)
- self.files = files.FilesResourceWithStreamingResponse(client.files)
- self.fine_tuning = fine_tuning.FineTuningResourceWithStreamingResponse(client.fine_tuning)
- self.images = images.ImagesResourceWithStreamingResponse(client.images)
self.models = models.ModelsResourceWithStreamingResponse(client.models)
- self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations)
- self.organization = organization.OrganizationResourceWithStreamingResponse(client.organization)
- self.realtime = realtime.RealtimeResourceWithStreamingResponse(client.realtime)
- self.responses = responses.ResponsesResourceWithStreamingResponse(client.responses)
- self.threads = threads.ThreadsResourceWithStreamingResponse(client.threads)
- self.uploads = uploads.UploadsResourceWithStreamingResponse(client.uploads)
- self.vector_stores = vector_stores.VectorStoresResourceWithStreamingResponse(client.vector_stores)
class AsyncDigitaloceanGenaiSDKWithStreamedResponse:
def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None:
- self.assistants = assistants.AsyncAssistantsResourceWithStreamingResponse(client.assistants)
- self.audio = audio.AsyncAudioResourceWithStreamingResponse(client.audio)
- self.batches = batches.AsyncBatchesResourceWithStreamingResponse(client.batches)
self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat)
- self.completions = completions.AsyncCompletionsResourceWithStreamingResponse(client.completions)
self.embeddings = embeddings.AsyncEmbeddingsResourceWithStreamingResponse(client.embeddings)
- self.files = files.AsyncFilesResourceWithStreamingResponse(client.files)
- self.fine_tuning = fine_tuning.AsyncFineTuningResourceWithStreamingResponse(client.fine_tuning)
- self.images = images.AsyncImagesResourceWithStreamingResponse(client.images)
self.models = models.AsyncModelsResourceWithStreamingResponse(client.models)
- self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations)
- self.organization = organization.AsyncOrganizationResourceWithStreamingResponse(client.organization)
- self.realtime = realtime.AsyncRealtimeResourceWithStreamingResponse(client.realtime)
- self.responses = responses.AsyncResponsesResourceWithStreamingResponse(client.responses)
- self.threads = threads.AsyncThreadsResourceWithStreamingResponse(client.threads)
- self.uploads = uploads.AsyncUploadsResourceWithStreamingResponse(client.uploads)
- self.vector_stores = vector_stores.AsyncVectorStoresResourceWithStreamingResponse(client.vector_stores)
Client = DigitaloceanGenaiSDK
diff --git a/src/digitalocean_genai_sdk/_files.py b/src/digitalocean_genai_sdk/_files.py
index df28b382..715cc207 100644
--- a/src/digitalocean_genai_sdk/_files.py
+++ b/src/digitalocean_genai_sdk/_files.py
@@ -34,7 +34,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
if not is_file_content(obj):
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
raise RuntimeError(
- f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main#file-uploads"
+ f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead."
) from None
diff --git a/src/digitalocean_genai_sdk/_models.py b/src/digitalocean_genai_sdk/_models.py
index 798956f1..4f214980 100644
--- a/src/digitalocean_genai_sdk/_models.py
+++ b/src/digitalocean_genai_sdk/_models.py
@@ -737,6 +737,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
idempotency_key: str
json_data: Body
extra_json: AnyMapping
+ follow_redirects: bool
@final
@@ -750,6 +751,7 @@ class FinalRequestOptions(pydantic.BaseModel):
files: Union[HttpxRequestFiles, None] = None
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
+ follow_redirects: Union[bool, None] = None
# It should be noted that we cannot use `json` here as that would override
# a BaseModel method in an incompatible fashion.
diff --git a/src/digitalocean_genai_sdk/_types.py b/src/digitalocean_genai_sdk/_types.py
index b2bfbbec..3c0d156e 100644
--- a/src/digitalocean_genai_sdk/_types.py
+++ b/src/digitalocean_genai_sdk/_types.py
@@ -100,6 +100,7 @@ class RequestOptions(TypedDict, total=False):
params: Query
extra_json: AnyMapping
idempotency_key: str
+ follow_redirects: bool
# Sentinel class used until PEP 0661 is accepted
@@ -215,3 +216,4 @@ class _GenericAlias(Protocol):
class HttpxSendArgs(TypedDict, total=False):
auth: httpx.Auth
+ follow_redirects: bool
diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py
index 5c4fa53a..a788b3f9 100644
--- a/src/digitalocean_genai_sdk/_version.py
+++ b/src/digitalocean_genai_sdk/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "digitalocean_genai_sdk"
-__version__ = "0.0.1-alpha.0"
+__version__ = "0.1.0-alpha.1" # x-release-please-version
diff --git a/src/digitalocean_genai_sdk/resources/__init__.py b/src/digitalocean_genai_sdk/resources/__init__.py
index 237b0ca7..c9177434 100644
--- a/src/digitalocean_genai_sdk/resources/__init__.py
+++ b/src/digitalocean_genai_sdk/resources/__init__.py
@@ -8,30 +8,6 @@
ChatResourceWithStreamingResponse,
AsyncChatResourceWithStreamingResponse,
)
-from .audio import (
- AudioResource,
- AsyncAudioResource,
- AudioResourceWithRawResponse,
- AsyncAudioResourceWithRawResponse,
- AudioResourceWithStreamingResponse,
- AsyncAudioResourceWithStreamingResponse,
-)
-from .files import (
- FilesResource,
- AsyncFilesResource,
- FilesResourceWithRawResponse,
- AsyncFilesResourceWithRawResponse,
- FilesResourceWithStreamingResponse,
- AsyncFilesResourceWithStreamingResponse,
-)
-from .images import (
- ImagesResource,
- AsyncImagesResource,
- ImagesResourceWithRawResponse,
- AsyncImagesResourceWithRawResponse,
- ImagesResourceWithStreamingResponse,
- AsyncImagesResourceWithStreamingResponse,
-)
from .models import (
ModelsResource,
AsyncModelsResource,
@@ -40,54 +16,6 @@
ModelsResourceWithStreamingResponse,
AsyncModelsResourceWithStreamingResponse,
)
-from .batches import (
- BatchesResource,
- AsyncBatchesResource,
- BatchesResourceWithRawResponse,
- AsyncBatchesResourceWithRawResponse,
- BatchesResourceWithStreamingResponse,
- AsyncBatchesResourceWithStreamingResponse,
-)
-from .threads import (
- ThreadsResource,
- AsyncThreadsResource,
- ThreadsResourceWithRawResponse,
- AsyncThreadsResourceWithRawResponse,
- ThreadsResourceWithStreamingResponse,
- AsyncThreadsResourceWithStreamingResponse,
-)
-from .uploads import (
- UploadsResource,
- AsyncUploadsResource,
- UploadsResourceWithRawResponse,
- AsyncUploadsResourceWithRawResponse,
- UploadsResourceWithStreamingResponse,
- AsyncUploadsResourceWithStreamingResponse,
-)
-from .realtime import (
- RealtimeResource,
- AsyncRealtimeResource,
- RealtimeResourceWithRawResponse,
- AsyncRealtimeResourceWithRawResponse,
- RealtimeResourceWithStreamingResponse,
- AsyncRealtimeResourceWithStreamingResponse,
-)
-from .responses import (
- ResponsesResource,
- AsyncResponsesResource,
- ResponsesResourceWithRawResponse,
- AsyncResponsesResourceWithRawResponse,
- ResponsesResourceWithStreamingResponse,
- AsyncResponsesResourceWithStreamingResponse,
-)
-from .assistants import (
- AssistantsResource,
- AsyncAssistantsResource,
- AssistantsResourceWithRawResponse,
- AsyncAssistantsResourceWithRawResponse,
- AssistantsResourceWithStreamingResponse,
- AsyncAssistantsResourceWithStreamingResponse,
-)
from .embeddings import (
EmbeddingsResource,
AsyncEmbeddingsResource,
@@ -96,148 +24,24 @@
EmbeddingsResourceWithStreamingResponse,
AsyncEmbeddingsResourceWithStreamingResponse,
)
-from .completions import (
- CompletionsResource,
- AsyncCompletionsResource,
- CompletionsResourceWithRawResponse,
- AsyncCompletionsResourceWithRawResponse,
- CompletionsResourceWithStreamingResponse,
- AsyncCompletionsResourceWithStreamingResponse,
-)
-from .fine_tuning import (
- FineTuningResource,
- AsyncFineTuningResource,
- FineTuningResourceWithRawResponse,
- AsyncFineTuningResourceWithRawResponse,
- FineTuningResourceWithStreamingResponse,
- AsyncFineTuningResourceWithStreamingResponse,
-)
-from .moderations import (
- ModerationsResource,
- AsyncModerationsResource,
- ModerationsResourceWithRawResponse,
- AsyncModerationsResourceWithRawResponse,
- ModerationsResourceWithStreamingResponse,
- AsyncModerationsResourceWithStreamingResponse,
-)
-from .organization import (
- OrganizationResource,
- AsyncOrganizationResource,
- OrganizationResourceWithRawResponse,
- AsyncOrganizationResourceWithRawResponse,
- OrganizationResourceWithStreamingResponse,
- AsyncOrganizationResourceWithStreamingResponse,
-)
-from .vector_stores import (
- VectorStoresResource,
- AsyncVectorStoresResource,
- VectorStoresResourceWithRawResponse,
- AsyncVectorStoresResourceWithRawResponse,
- VectorStoresResourceWithStreamingResponse,
- AsyncVectorStoresResourceWithStreamingResponse,
-)
__all__ = [
- "AssistantsResource",
- "AsyncAssistantsResource",
- "AssistantsResourceWithRawResponse",
- "AsyncAssistantsResourceWithRawResponse",
- "AssistantsResourceWithStreamingResponse",
- "AsyncAssistantsResourceWithStreamingResponse",
- "AudioResource",
- "AsyncAudioResource",
- "AudioResourceWithRawResponse",
- "AsyncAudioResourceWithRawResponse",
- "AudioResourceWithStreamingResponse",
- "AsyncAudioResourceWithStreamingResponse",
- "BatchesResource",
- "AsyncBatchesResource",
- "BatchesResourceWithRawResponse",
- "AsyncBatchesResourceWithRawResponse",
- "BatchesResourceWithStreamingResponse",
- "AsyncBatchesResourceWithStreamingResponse",
"ChatResource",
"AsyncChatResource",
"ChatResourceWithRawResponse",
"AsyncChatResourceWithRawResponse",
"ChatResourceWithStreamingResponse",
"AsyncChatResourceWithStreamingResponse",
- "CompletionsResource",
- "AsyncCompletionsResource",
- "CompletionsResourceWithRawResponse",
- "AsyncCompletionsResourceWithRawResponse",
- "CompletionsResourceWithStreamingResponse",
- "AsyncCompletionsResourceWithStreamingResponse",
"EmbeddingsResource",
"AsyncEmbeddingsResource",
"EmbeddingsResourceWithRawResponse",
"AsyncEmbeddingsResourceWithRawResponse",
"EmbeddingsResourceWithStreamingResponse",
"AsyncEmbeddingsResourceWithStreamingResponse",
- "FilesResource",
- "AsyncFilesResource",
- "FilesResourceWithRawResponse",
- "AsyncFilesResourceWithRawResponse",
- "FilesResourceWithStreamingResponse",
- "AsyncFilesResourceWithStreamingResponse",
- "FineTuningResource",
- "AsyncFineTuningResource",
- "FineTuningResourceWithRawResponse",
- "AsyncFineTuningResourceWithRawResponse",
- "FineTuningResourceWithStreamingResponse",
- "AsyncFineTuningResourceWithStreamingResponse",
- "ImagesResource",
- "AsyncImagesResource",
- "ImagesResourceWithRawResponse",
- "AsyncImagesResourceWithRawResponse",
- "ImagesResourceWithStreamingResponse",
- "AsyncImagesResourceWithStreamingResponse",
"ModelsResource",
"AsyncModelsResource",
"ModelsResourceWithRawResponse",
"AsyncModelsResourceWithRawResponse",
"ModelsResourceWithStreamingResponse",
"AsyncModelsResourceWithStreamingResponse",
- "ModerationsResource",
- "AsyncModerationsResource",
- "ModerationsResourceWithRawResponse",
- "AsyncModerationsResourceWithRawResponse",
- "ModerationsResourceWithStreamingResponse",
- "AsyncModerationsResourceWithStreamingResponse",
- "OrganizationResource",
- "AsyncOrganizationResource",
- "OrganizationResourceWithRawResponse",
- "AsyncOrganizationResourceWithRawResponse",
- "OrganizationResourceWithStreamingResponse",
- "AsyncOrganizationResourceWithStreamingResponse",
- "RealtimeResource",
- "AsyncRealtimeResource",
- "RealtimeResourceWithRawResponse",
- "AsyncRealtimeResourceWithRawResponse",
- "RealtimeResourceWithStreamingResponse",
- "AsyncRealtimeResourceWithStreamingResponse",
- "ResponsesResource",
- "AsyncResponsesResource",
- "ResponsesResourceWithRawResponse",
- "AsyncResponsesResourceWithRawResponse",
- "ResponsesResourceWithStreamingResponse",
- "AsyncResponsesResourceWithStreamingResponse",
- "ThreadsResource",
- "AsyncThreadsResource",
- "ThreadsResourceWithRawResponse",
- "AsyncThreadsResourceWithRawResponse",
- "ThreadsResourceWithStreamingResponse",
- "AsyncThreadsResourceWithStreamingResponse",
- "UploadsResource",
- "AsyncUploadsResource",
- "UploadsResourceWithRawResponse",
- "AsyncUploadsResourceWithRawResponse",
- "UploadsResourceWithStreamingResponse",
- "AsyncUploadsResourceWithStreamingResponse",
- "VectorStoresResource",
- "AsyncVectorStoresResource",
- "VectorStoresResourceWithRawResponse",
- "AsyncVectorStoresResourceWithRawResponse",
- "VectorStoresResourceWithStreamingResponse",
- "AsyncVectorStoresResourceWithStreamingResponse",
]
diff --git a/src/digitalocean_genai_sdk/resources/assistants.py b/src/digitalocean_genai_sdk/resources/assistants.py
deleted file mode 100644
index c6ae36f5..00000000
--- a/src/digitalocean_genai_sdk/resources/assistants.py
+++ /dev/null
@@ -1,910 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import (
- ReasoningEffort,
- assistant_list_params,
- assistant_create_params,
- assistant_update_params,
-)
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.assistant_object import AssistantObject
-from ..types.reasoning_effort import ReasoningEffort
-from ..types.assistant_list_response import AssistantListResponse
-from ..types.assistant_delete_response import AssistantDeleteResponse
-from ..types.assistant_supported_models import AssistantSupportedModels
-from ..types.assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = ["AssistantsResource", "AsyncAssistantsResource"]
-
-
-class AssistantsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AssistantsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AssistantsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AssistantsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AssistantsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- model: Union[str, AssistantSupportedModels],
- description: Optional[str] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """
- Create an assistant with a model and instructions.
-
- Args:
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- description: The description of the assistant. The maximum length is 512 characters.
-
- instructions: The system instructions that the assistant uses. The maximum length is 256,000
- characters.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the assistant. The maximum length is 256 characters.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `file_search`, or
- `function`.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/assistants",
- body=maybe_transform(
- {
- "model": model,
- "description": description,
- "instructions": instructions,
- "metadata": metadata,
- "name": name,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "temperature": temperature,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- },
- assistant_create_params.AssistantCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- def retrieve(
- self,
- assistant_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """
- Retrieves an assistant.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return self._get(
- f"/assistants/{assistant_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- def update(
- self,
- assistant_id: str,
- *,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[str, AssistantSupportedModels] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """Modifies an assistant.
-
- Args:
- description: The description of the assistant.
-
- The maximum length is 512 characters.
-
- instructions: The system instructions that the assistant uses. The maximum length is 256,000
- characters.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- name: The name of the assistant. The maximum length is 256 characters.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `file_search`, or
- `function`.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return self._post(
- f"/assistants/{assistant_id}",
- body=maybe_transform(
- {
- "description": description,
- "instructions": instructions,
- "metadata": metadata,
- "model": model,
- "name": name,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "temperature": temperature,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- },
- assistant_update_params.AssistantUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantListResponse:
- """Returns a list of assistants.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/assistants",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- assistant_list_params.AssistantListParams,
- ),
- ),
- cast_to=AssistantListResponse,
- )
-
- def delete(
- self,
- assistant_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantDeleteResponse:
- """
- Delete an assistant.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return self._delete(
- f"/assistants/{assistant_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantDeleteResponse,
- )
-
-
-class AsyncAssistantsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAssistantsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAssistantsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAssistantsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncAssistantsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- model: Union[str, AssistantSupportedModels],
- description: Optional[str] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """
- Create an assistant with a model and instructions.
-
- Args:
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- description: The description of the assistant. The maximum length is 512 characters.
-
- instructions: The system instructions that the assistant uses. The maximum length is 256,000
- characters.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the assistant. The maximum length is 256 characters.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `file_search`, or
- `function`.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/assistants",
- body=await async_maybe_transform(
- {
- "model": model,
- "description": description,
- "instructions": instructions,
- "metadata": metadata,
- "name": name,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "temperature": temperature,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- },
- assistant_create_params.AssistantCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- async def retrieve(
- self,
- assistant_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """
- Retrieves an assistant.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return await self._get(
- f"/assistants/{assistant_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- async def update(
- self,
- assistant_id: str,
- *,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[str, AssistantSupportedModels] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """Modifies an assistant.
-
- Args:
- description: The description of the assistant.
-
- The maximum length is 512 characters.
-
- instructions: The system instructions that the assistant uses. The maximum length is 256,000
- characters.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- name: The name of the assistant. The maximum length is 256 characters.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `file_search`, or
- `function`.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return await self._post(
- f"/assistants/{assistant_id}",
- body=await async_maybe_transform(
- {
- "description": description,
- "instructions": instructions,
- "metadata": metadata,
- "model": model,
- "name": name,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "temperature": temperature,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- },
- assistant_update_params.AssistantUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantListResponse:
- """Returns a list of assistants.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/assistants",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- assistant_list_params.AssistantListParams,
- ),
- ),
- cast_to=AssistantListResponse,
- )
-
- async def delete(
- self,
- assistant_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantDeleteResponse:
- """
- Delete an assistant.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return await self._delete(
- f"/assistants/{assistant_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantDeleteResponse,
- )
-
-
-class AssistantsResourceWithRawResponse:
- def __init__(self, assistants: AssistantsResource) -> None:
- self._assistants = assistants
-
- self.create = to_raw_response_wrapper(
- assistants.create,
- )
- self.retrieve = to_raw_response_wrapper(
- assistants.retrieve,
- )
- self.update = to_raw_response_wrapper(
- assistants.update,
- )
- self.list = to_raw_response_wrapper(
- assistants.list,
- )
- self.delete = to_raw_response_wrapper(
- assistants.delete,
- )
-
-
-class AsyncAssistantsResourceWithRawResponse:
- def __init__(self, assistants: AsyncAssistantsResource) -> None:
- self._assistants = assistants
-
- self.create = async_to_raw_response_wrapper(
- assistants.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- assistants.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- assistants.update,
- )
- self.list = async_to_raw_response_wrapper(
- assistants.list,
- )
- self.delete = async_to_raw_response_wrapper(
- assistants.delete,
- )
-
-
-class AssistantsResourceWithStreamingResponse:
- def __init__(self, assistants: AssistantsResource) -> None:
- self._assistants = assistants
-
- self.create = to_streamed_response_wrapper(
- assistants.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- assistants.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- assistants.update,
- )
- self.list = to_streamed_response_wrapper(
- assistants.list,
- )
- self.delete = to_streamed_response_wrapper(
- assistants.delete,
- )
-
-
-class AsyncAssistantsResourceWithStreamingResponse:
- def __init__(self, assistants: AsyncAssistantsResource) -> None:
- self._assistants = assistants
-
- self.create = async_to_streamed_response_wrapper(
- assistants.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- assistants.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- assistants.update,
- )
- self.list = async_to_streamed_response_wrapper(
- assistants.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- assistants.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/audio.py b/src/digitalocean_genai_sdk/resources/audio.py
deleted file mode 100644
index 7cecbe6d..00000000
--- a/src/digitalocean_genai_sdk/resources/audio.py
+++ /dev/null
@@ -1,650 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Any, List, Union, Mapping, Optional, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import (
- audio_generate_speech_params,
- audio_translate_audio_params,
- audio_transcribe_audio_params,
-)
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- BinaryAPIResponse,
- AsyncBinaryAPIResponse,
- StreamedBinaryAPIResponse,
- AsyncStreamedBinaryAPIResponse,
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- to_custom_raw_response_wrapper,
- async_to_streamed_response_wrapper,
- to_custom_streamed_response_wrapper,
- async_to_custom_raw_response_wrapper,
- async_to_custom_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.voice_ids_shared_param import VoiceIDsSharedParam
-from ..types.audio_translate_audio_response import AudioTranslateAudioResponse
-from ..types.audio_transcribe_audio_response import AudioTranscribeAudioResponse
-
-__all__ = ["AudioResource", "AsyncAudioResource"]
-
-
-class AudioResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AudioResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AudioResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AudioResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AudioResourceWithStreamingResponse(self)
-
- def generate_speech(
- self,
- *,
- input: str,
- model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]],
- voice: VoiceIDsSharedParam,
- instructions: str | NotGiven = NOT_GIVEN,
- response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
- speed: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BinaryAPIResponse:
- """
- Generates audio from the input text.
-
- Args:
- input: The text to generate audio for. The maximum length is 4096 characters.
-
- model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or
- `gpt-4o-mini-tts`.
-
- voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
- `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
- `verse`. Previews of the voices are available in the
- [Text to speech guide](/docs/guides/text-to-speech#voice-options).
-
- instructions: Control the voice of your generated audio with additional instructions. Does not
- work with `tts-1` or `tts-1-hd`.
-
- response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
- `wav`, and `pcm`.
-
- speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
- the default.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
- return self._post(
- "/audio/speech",
- body=maybe_transform(
- {
- "input": input,
- "model": model,
- "voice": voice,
- "instructions": instructions,
- "response_format": response_format,
- "speed": speed,
- },
- audio_generate_speech_params.AudioGenerateSpeechParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=BinaryAPIResponse,
- )
-
- def transcribe_audio(
- self,
- *,
- file: FileTypes,
- model: Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]],
- include: List[Literal["logprobs"]] | NotGiven = NOT_GIVEN,
- language: str | NotGiven = NOT_GIVEN,
- prompt: str | NotGiven = NOT_GIVEN,
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AudioTranscribeAudioResponse:
- """
- Transcribes audio into the input language.
-
- Args:
- file:
- The audio file object (not file name) to transcribe, in one of these formats:
- flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
- model: ID of the model to use. The options are `gpt-4o-transcribe`,
- `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
- Whisper V2 model).
-
- include: Additional information to include in the transcription response. `logprobs` will
- return the log probabilities of the tokens in the response to understand the
- model's confidence in the transcription. `logprobs` only works with
- response_format set to `json` and only with the models `gpt-4o-transcribe` and
- `gpt-4o-mini-transcribe`.
-
- language: The language of the input audio. Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
-
- prompt: An optional text to guide the model's style or continue a previous audio
- segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the
- audio language.
-
- response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
- the only supported format is `json`.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the
- [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
- for more information.
-
- Note: Streaming is not supported for the `whisper-1` model and will be ignored.
-
- temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
- output more random, while lower values like 0.2 will make it more focused and
- deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
-
- timestamp_granularities: The timestamp granularities to populate for this transcription.
- `response_format` must be set `verbose_json` to use timestamp granularities.
- Either or both of these options are supported: `word`, or `segment`. Note: There
- is no additional latency for segment timestamps, but generating word timestamps
- incurs additional latency.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "model": model,
- "include": include,
- "language": language,
- "prompt": prompt,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "timestamp_granularities": timestamp_granularities,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return cast(
- AudioTranscribeAudioResponse,
- self._post(
- "/audio/transcriptions",
- body=maybe_transform(body, audio_transcribe_audio_params.AudioTranscribeAudioParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=cast(
- Any, AudioTranscribeAudioResponse
- ), # Union types cannot be passed in as arguments in the type system
- ),
- )
-
- def translate_audio(
- self,
- *,
- file: FileTypes,
- model: Union[str, Literal["whisper-1"]],
- prompt: str | NotGiven = NOT_GIVEN,
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AudioTranslateAudioResponse:
- """
- Translates audio into English.
-
- Args:
- file: The audio file object (not file name) translate, in one of these formats: flac,
- mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
- model: ID of the model to use. Only `whisper-1` (which is powered by our open source
- Whisper V2 model) is currently available.
-
- prompt: An optional text to guide the model's style or continue a previous audio
- segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in
- English.
-
- response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`.
-
- temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
- output more random, while lower values like 0.2 will make it more focused and
- deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "model": model,
- "prompt": prompt,
- "response_format": response_format,
- "temperature": temperature,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return cast(
- AudioTranslateAudioResponse,
- self._post(
- "/audio/translations",
- body=maybe_transform(body, audio_translate_audio_params.AudioTranslateAudioParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=cast(
- Any, AudioTranslateAudioResponse
- ), # Union types cannot be passed in as arguments in the type system
- ),
- )
-
-
-class AsyncAudioResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAudioResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAudioResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAudioResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncAudioResourceWithStreamingResponse(self)
-
- async def generate_speech(
- self,
- *,
- input: str,
- model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]],
- voice: VoiceIDsSharedParam,
- instructions: str | NotGiven = NOT_GIVEN,
- response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
- speed: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncBinaryAPIResponse:
- """
- Generates audio from the input text.
-
- Args:
- input: The text to generate audio for. The maximum length is 4096 characters.
-
- model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or
- `gpt-4o-mini-tts`.
-
- voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
- `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
- `verse`. Previews of the voices are available in the
- [Text to speech guide](/docs/guides/text-to-speech#voice-options).
-
- instructions: Control the voice of your generated audio with additional instructions. Does not
- work with `tts-1` or `tts-1-hd`.
-
- response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
- `wav`, and `pcm`.
-
- speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
- the default.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
- return await self._post(
- "/audio/speech",
- body=await async_maybe_transform(
- {
- "input": input,
- "model": model,
- "voice": voice,
- "instructions": instructions,
- "response_format": response_format,
- "speed": speed,
- },
- audio_generate_speech_params.AudioGenerateSpeechParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AsyncBinaryAPIResponse,
- )
-
- async def transcribe_audio(
- self,
- *,
- file: FileTypes,
- model: Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]],
- include: List[Literal["logprobs"]] | NotGiven = NOT_GIVEN,
- language: str | NotGiven = NOT_GIVEN,
- prompt: str | NotGiven = NOT_GIVEN,
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AudioTranscribeAudioResponse:
- """
- Transcribes audio into the input language.
-
- Args:
- file:
- The audio file object (not file name) to transcribe, in one of these formats:
- flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
- model: ID of the model to use. The options are `gpt-4o-transcribe`,
- `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
- Whisper V2 model).
-
- include: Additional information to include in the transcription response. `logprobs` will
- return the log probabilities of the tokens in the response to understand the
- model's confidence in the transcription. `logprobs` only works with
- response_format set to `json` and only with the models `gpt-4o-transcribe` and
- `gpt-4o-mini-transcribe`.
-
- language: The language of the input audio. Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
-
- prompt: An optional text to guide the model's style or continue a previous audio
- segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the
- audio language.
-
- response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
- the only supported format is `json`.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the
- [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
- for more information.
-
- Note: Streaming is not supported for the `whisper-1` model and will be ignored.
-
- temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
- output more random, while lower values like 0.2 will make it more focused and
- deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
-
- timestamp_granularities: The timestamp granularities to populate for this transcription.
- `response_format` must be set `verbose_json` to use timestamp granularities.
- Either or both of these options are supported: `word`, or `segment`. Note: There
- is no additional latency for segment timestamps, but generating word timestamps
- incurs additional latency.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "model": model,
- "include": include,
- "language": language,
- "prompt": prompt,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "timestamp_granularities": timestamp_granularities,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return cast(
- AudioTranscribeAudioResponse,
- await self._post(
- "/audio/transcriptions",
- body=await async_maybe_transform(body, audio_transcribe_audio_params.AudioTranscribeAudioParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=cast(
- Any, AudioTranscribeAudioResponse
- ), # Union types cannot be passed in as arguments in the type system
- ),
- )
-
- async def translate_audio(
- self,
- *,
- file: FileTypes,
- model: Union[str, Literal["whisper-1"]],
- prompt: str | NotGiven = NOT_GIVEN,
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AudioTranslateAudioResponse:
- """
- Translates audio into English.
-
- Args:
- file: The audio file object (not file name) translate, in one of these formats: flac,
- mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
- model: ID of the model to use. Only `whisper-1` (which is powered by our open source
- Whisper V2 model) is currently available.
-
- prompt: An optional text to guide the model's style or continue a previous audio
- segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in
- English.
-
- response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`.
-
- temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
- output more random, while lower values like 0.2 will make it more focused and
- deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "model": model,
- "prompt": prompt,
- "response_format": response_format,
- "temperature": temperature,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return cast(
- AudioTranslateAudioResponse,
- await self._post(
- "/audio/translations",
- body=await async_maybe_transform(body, audio_translate_audio_params.AudioTranslateAudioParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=cast(
- Any, AudioTranslateAudioResponse
- ), # Union types cannot be passed in as arguments in the type system
- ),
- )
-
-
-class AudioResourceWithRawResponse:
- def __init__(self, audio: AudioResource) -> None:
- self._audio = audio
-
- self.generate_speech = to_custom_raw_response_wrapper(
- audio.generate_speech,
- BinaryAPIResponse,
- )
- self.transcribe_audio = to_raw_response_wrapper(
- audio.transcribe_audio,
- )
- self.translate_audio = to_raw_response_wrapper(
- audio.translate_audio,
- )
-
-
-class AsyncAudioResourceWithRawResponse:
- def __init__(self, audio: AsyncAudioResource) -> None:
- self._audio = audio
-
- self.generate_speech = async_to_custom_raw_response_wrapper(
- audio.generate_speech,
- AsyncBinaryAPIResponse,
- )
- self.transcribe_audio = async_to_raw_response_wrapper(
- audio.transcribe_audio,
- )
- self.translate_audio = async_to_raw_response_wrapper(
- audio.translate_audio,
- )
-
-
-class AudioResourceWithStreamingResponse:
- def __init__(self, audio: AudioResource) -> None:
- self._audio = audio
-
- self.generate_speech = to_custom_streamed_response_wrapper(
- audio.generate_speech,
- StreamedBinaryAPIResponse,
- )
- self.transcribe_audio = to_streamed_response_wrapper(
- audio.transcribe_audio,
- )
- self.translate_audio = to_streamed_response_wrapper(
- audio.translate_audio,
- )
-
-
-class AsyncAudioResourceWithStreamingResponse:
- def __init__(self, audio: AsyncAudioResource) -> None:
- self._audio = audio
-
- self.generate_speech = async_to_custom_streamed_response_wrapper(
- audio.generate_speech,
- AsyncStreamedBinaryAPIResponse,
- )
- self.transcribe_audio = async_to_streamed_response_wrapper(
- audio.transcribe_audio,
- )
- self.translate_audio = async_to_streamed_response_wrapper(
- audio.translate_audio,
- )
diff --git a/src/digitalocean_genai_sdk/resources/batches.py b/src/digitalocean_genai_sdk/resources/batches.py
deleted file mode 100644
index a2b1fedf..00000000
--- a/src/digitalocean_genai_sdk/resources/batches.py
+++ /dev/null
@@ -1,513 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import batch_list_params, batch_create_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..types.batch import Batch
-from .._base_client import make_request_options
-from ..types.batch_list_response import BatchListResponse
-
-__all__ = ["BatchesResource", "AsyncBatchesResource"]
-
-
-class BatchesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> BatchesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return BatchesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> BatchesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return BatchesResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- completion_window: Literal["24h"],
- endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
- input_file_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """
- Creates and executes a batch from an uploaded file of requests
-
- Args:
- completion_window: The time frame within which the batch should be processed. Currently only `24h`
- is supported.
-
- endpoint: The endpoint to be used for all requests in the batch. Currently
- `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
- are supported. Note that `/v1/embeddings` batches are also restricted to a
- maximum of 50,000 embedding inputs across all requests in the batch.
-
- input_file_id: The ID of an uploaded file that contains requests for the new batch.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your input file must be formatted as a
- [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with
- the purpose `batch`. The file can contain up to 50,000 requests, and can be up
- to 200 MB in size.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/batches",
- body=maybe_transform(
- {
- "completion_window": completion_window,
- "endpoint": endpoint,
- "input_file_id": input_file_id,
- "metadata": metadata,
- },
- batch_create_params.BatchCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
- def retrieve(
- self,
- batch_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """
- Retrieves a batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._get(
- f"/batches/{batch_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BatchListResponse:
- """List your organization's batches.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/batches",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- batch_list_params.BatchListParams,
- ),
- ),
- cast_to=BatchListResponse,
- )
-
- def cancel(
- self,
- batch_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """Cancels an in-progress batch.
-
- The batch will be in status `cancelling` for up to
- 10 minutes, before changing to `cancelled`, where it will have partial results
- (if any) available in the output file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._post(
- f"/batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
-
-class AsyncBatchesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncBatchesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncBatchesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncBatchesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncBatchesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- completion_window: Literal["24h"],
- endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
- input_file_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """
- Creates and executes a batch from an uploaded file of requests
-
- Args:
- completion_window: The time frame within which the batch should be processed. Currently only `24h`
- is supported.
-
- endpoint: The endpoint to be used for all requests in the batch. Currently
- `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
- are supported. Note that `/v1/embeddings` batches are also restricted to a
- maximum of 50,000 embedding inputs across all requests in the batch.
-
- input_file_id: The ID of an uploaded file that contains requests for the new batch.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your input file must be formatted as a
- [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with
- the purpose `batch`. The file can contain up to 50,000 requests, and can be up
- to 200 MB in size.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/batches",
- body=await async_maybe_transform(
- {
- "completion_window": completion_window,
- "endpoint": endpoint,
- "input_file_id": input_file_id,
- "metadata": metadata,
- },
- batch_create_params.BatchCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
- async def retrieve(
- self,
- batch_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """
- Retrieves a batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._get(
- f"/batches/{batch_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BatchListResponse:
- """List your organization's batches.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/batches",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- batch_list_params.BatchListParams,
- ),
- ),
- cast_to=BatchListResponse,
- )
-
- async def cancel(
- self,
- batch_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """Cancels an in-progress batch.
-
- The batch will be in status `cancelling` for up to
- 10 minutes, before changing to `cancelled`, where it will have partial results
- (if any) available in the output file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._post(
- f"/batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
-
-class BatchesResourceWithRawResponse:
- def __init__(self, batches: BatchesResource) -> None:
- self._batches = batches
-
- self.create = to_raw_response_wrapper(
- batches.create,
- )
- self.retrieve = to_raw_response_wrapper(
- batches.retrieve,
- )
- self.list = to_raw_response_wrapper(
- batches.list,
- )
- self.cancel = to_raw_response_wrapper(
- batches.cancel,
- )
-
-
-class AsyncBatchesResourceWithRawResponse:
- def __init__(self, batches: AsyncBatchesResource) -> None:
- self._batches = batches
-
- self.create = async_to_raw_response_wrapper(
- batches.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- batches.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- batches.list,
- )
- self.cancel = async_to_raw_response_wrapper(
- batches.cancel,
- )
-
-
-class BatchesResourceWithStreamingResponse:
- def __init__(self, batches: BatchesResource) -> None:
- self._batches = batches
-
- self.create = to_streamed_response_wrapper(
- batches.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- batches.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- batches.list,
- )
- self.cancel = to_streamed_response_wrapper(
- batches.cancel,
- )
-
-
-class AsyncBatchesResourceWithStreamingResponse:
- def __init__(self, batches: AsyncBatchesResource) -> None:
- self._batches = batches
-
- self.create = async_to_streamed_response_wrapper(
- batches.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- batches.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- batches.list,
- )
- self.cancel = async_to_streamed_response_wrapper(
- batches.cancel,
- )
diff --git a/src/digitalocean_genai_sdk/resources/chat/chat.py b/src/digitalocean_genai_sdk/resources/chat/chat.py
index df1f356c..ac19d849 100644
--- a/src/digitalocean_genai_sdk/resources/chat/chat.py
+++ b/src/digitalocean_genai_sdk/resources/chat/chat.py
@@ -27,7 +27,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers
"""
return ChatResourceWithRawResponse(self)
@@ -36,7 +36,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
+ For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response
"""
return ChatResourceWithStreamingResponse(self)
@@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers
"""
return AsyncChatResourceWithRawResponse(self)
@@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
+ For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response
"""
return AsyncChatResourceWithStreamingResponse(self)
diff --git a/src/digitalocean_genai_sdk/resources/chat/completions.py b/src/digitalocean_genai_sdk/resources/chat/completions.py
index c0908a57..effaf077 100644
--- a/src/digitalocean_genai_sdk/resources/chat/completions.py
+++ b/src/digitalocean_genai_sdk/resources/chat/completions.py
@@ -2,12 +2,10 @@
from __future__ import annotations
-from typing import Dict, List, Iterable, Optional
-from typing_extensions import Literal
+from typing import Dict, Iterable, Optional
import httpx
-from ...types import ReasoningEffort
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
@@ -18,21 +16,11 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ...types.chat import (
- completion_list_params,
- completion_create_params,
- completion_update_params,
- completion_list_messages_params,
-)
+from ...types.chat import completion_create_params
from ..._base_client import make_request_options
-from ...types.reasoning_effort import ReasoningEffort
from ...types.chat.create_response import CreateResponse
from ...types.stop_configuration_param import StopConfigurationParam
-from ...types.chat.model_ids_shared_param import ModelIDsSharedParam
-from ...types.chat.completion_list_response import CompletionListResponse
-from ...types.chat.completion_delete_response import CompletionDeleteResponse
from ...types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
-from ...types.chat.completion_list_messages_response import CompletionListMessagesResponse
__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
@@ -44,7 +32,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers
"""
return CompletionsResourceWithRawResponse(self)
@@ -53,7 +41,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
+ For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response
"""
return CompletionsResourceWithStreamingResponse(self)
@@ -61,36 +49,22 @@ def create(
self,
*,
messages: Iterable[completion_create_params.Message],
- model: ModelIDsSharedParam,
- audio: Optional[completion_create_params.Audio] | NotGiven = NOT_GIVEN,
+ model: str,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
- functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- prediction: Optional[completion_create_params.Prediction] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN,
- store: Optional[bool] | NotGiven = NOT_GIVEN,
stream: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
- web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -99,60 +73,17 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CreateResponse:
"""
- **Starting a new project?** We recommend trying
- [Responses](/docs/api-reference/responses) to take advantage of the latest
- OpenAI platform features. Compare
- [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses).
-
- ---
-
- Creates a model response for the given chat conversation. Learn more in the
- [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
- and [audio](/docs/guides/audio) guides.
-
- Parameter support can differ depending on the model used to generate the
- response, particularly for newer reasoning models. Parameters that are only
- supported for reasoning models are noted below. For the current state of
- unsupported parameters in reasoning models,
- [refer to the reasoning guide](/docs/guides/reasoning).
+ Creates a model response for the given chat conversation.
Args:
- messages: A list of messages comprising the conversation so far. Depending on the
- [model](/docs/models) you use, different message types (modalities) are
- supported, like [text](/docs/guides/text-generation),
- [images](/docs/guides/vision), and [audio](/docs/guides/audio).
-
- model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
- wide range of models with different capabilities, performance characteristics,
- and price points. Refer to the [model guide](/docs/models) to browse and compare
- available models.
+ messages: A list of messages comprising the conversation so far.
- audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
+ model: Model ID used to generate the response.
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- function_call: Deprecated in favor of `tool_choice`.
-
- Controls which (if any) function is called by the model.
-
- `none` means the model will not call a function and instead generates a message.
-
- `auto` means the model can pick between generating a message or calling a
- function.
-
- Specifying a particular function via `{"name": "my_function"}` forces the model
- to call that function.
-
- `none` is the default when no functions are present. `auto` is the default if
- functions are present.
-
- functions: Deprecated in favor of `tools`.
-
- A list of functions the model may generate JSON inputs for.
-
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
@@ -166,15 +97,14 @@ def create(
returns the log probabilities of each output token returned in the `content` of
`message`.
- max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
- max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
- completion. This value can be used to control
- [costs](https://openai.com/api/pricing/) for text generated via API.
+ max_tokens: The maximum number of tokens that can be generated in the completion.
- This value is now deprecated in favor of `max_completion_tokens`, and is not
- compatible with [o1 series models](/docs/guides/reasoning).
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
@@ -183,84 +113,19 @@ def create(
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
- modalities: Output types that you would like the model to generate. Most models are capable
- of generating text, which is the default:
-
- `["text"]`
-
- The `gpt-4o-audio-preview` model can also be used to
- [generate audio](/docs/guides/audio). To request that this model generate both
- text and audio responses, you can use:
-
- `["text", "audio"]`
-
n: How many chat completion choices to generate for each input message. Note that
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- prediction: Static predicted output content, such as the content of a text file that is
- being regenerated.
-
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: An object specifying the format that the model must output.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
-
- seed: This feature is in Beta. If specified, our system will make a best effort to
- sample deterministically, such that repeated requests with the same `seed` and
- parameters should return the same result. Determinism is not guaranteed, and you
- should refer to the `system_fingerprint` response parameter to monitor changes
- in the backend.
-
- service_tier: Specifies the latency tier to use for processing the request. This parameter is
- relevant for customers subscribed to the scale tier service:
-
- - If set to 'auto', and the Project is Scale tier enabled, the system will
- utilize scale tier credits until they are exhausted.
- - If set to 'auto', and the Project is not Scale tier enabled, the request will
- be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
- - If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
- - When not set, the default behavior is 'auto'.
-
- When this parameter is set, the response body will include the `service_tier`
- utilized.
-
stop: Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
- store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](/docs/guides/distillation) or
- [evals](/docs/guides/evals) products.
-
stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/chat/streaming) for more
- information, along with the
- [streaming responses](/docs/guides/streaming-responses) guide for more
- information on how to handle the streaming events.
+ generated using server-sent events.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -269,20 +134,6 @@ def create(
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tool and instead generates a message. `auto` means the model can
- pick between generating a message or calling one or more tools. `required` means
- the model must call one or more tools. Specifying a particular tool via
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- `none` is the default when no tools are present. `auto` is the default if tools
- are present.
-
- tools: A list of tools the model may call. Currently, only functions are supported as a
- tool. Use this to provide a list of functions the model may generate JSON inputs
- for. A max of 128 functions are supported.
-
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
@@ -293,11 +144,8 @@ def create(
We generally recommend altering this or `temperature` but not both.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
extra_headers: Send extra headers
@@ -313,35 +161,21 @@ def create(
{
"messages": messages,
"model": model,
- "audio": audio,
"frequency_penalty": frequency_penalty,
- "function_call": function_call,
- "functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
- "modalities": modalities,
"n": n,
- "parallel_tool_calls": parallel_tool_calls,
- "prediction": prediction,
"presence_penalty": presence_penalty,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "seed": seed,
- "service_tier": service_tier,
"stop": stop,
- "store": store,
"stream": stream,
"stream_options": stream_options,
"temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
- "web_search_options": web_search_options,
},
completion_create_params.CompletionCreateParams,
),
@@ -351,241 +185,6 @@ def create(
cast_to=CreateResponse,
)
- def retrieve(
- self,
- completion_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """Get a stored chat completion.
-
- Only Chat Completions that have been created with
- the `store` parameter set to `true` will be returned.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return self._get(
- f"/chat/completions/{completion_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- def update(
- self,
- completion_id: str,
- *,
- metadata: Optional[Dict[str, str]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """Modify a stored chat completion.
-
- Only Chat Completions that have been created
- with the `store` parameter set to `true` can be modified. Currently, the only
- supported modification is to update the `metadata` field.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return self._post(
- f"/chat/completions/{completion_id}",
- body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: str | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionListResponse:
- """List stored Chat Completions.
-
- Only Chat Completions that have been stored with
- the `store` parameter set to `true` will be returned.
-
- Args:
- after: Identifier for the last chat completion from the previous pagination request.
-
- limit: Number of Chat Completions to retrieve.
-
- metadata:
- A list of metadata keys to filter the Chat Completions by. Example:
-
- `metadata[key1]=value1&metadata[key2]=value2`
-
- model: The model used to generate the Chat Completions.
-
- order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
- `desc` for descending order. Defaults to `asc`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/chat/completions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "metadata": metadata,
- "model": model,
- "order": order,
- },
- completion_list_params.CompletionListParams,
- ),
- ),
- cast_to=CompletionListResponse,
- )
-
- def delete(
- self,
- completion_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionDeleteResponse:
- """Delete a stored chat completion.
-
- Only Chat Completions that have been created
- with the `store` parameter set to `true` can be deleted.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return self._delete(
- f"/chat/completions/{completion_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CompletionDeleteResponse,
- )
-
- def list_messages(
- self,
- completion_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionListMessagesResponse:
- """Get the messages in a stored chat completion.
-
- Only Chat Completions that have
- been created with the `store` parameter set to `true` will be returned.
-
- Args:
- after: Identifier for the last message from the previous pagination request.
-
- limit: Number of messages to retrieve.
-
- order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
- for descending order. Defaults to `asc`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return self._get(
- f"/chat/completions/{completion_id}/messages",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- },
- completion_list_messages_params.CompletionListMessagesParams,
- ),
- ),
- cast_to=CompletionListMessagesResponse,
- )
-
class AsyncCompletionsResource(AsyncAPIResource):
@cached_property
@@ -594,7 +193,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCompletionsResourceWithRawResponse(self)
@@ -603,7 +202,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
+ For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response
"""
return AsyncCompletionsResourceWithStreamingResponse(self)
@@ -611,36 +210,22 @@ async def create(
self,
*,
messages: Iterable[completion_create_params.Message],
- model: ModelIDsSharedParam,
- audio: Optional[completion_create_params.Audio] | NotGiven = NOT_GIVEN,
+ model: str,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
- functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- prediction: Optional[completion_create_params.Prediction] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN,
- store: Optional[bool] | NotGiven = NOT_GIVEN,
stream: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
- web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -649,60 +234,17 @@ async def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CreateResponse:
"""
- **Starting a new project?** We recommend trying
- [Responses](/docs/api-reference/responses) to take advantage of the latest
- OpenAI platform features. Compare
- [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses).
-
- ---
-
- Creates a model response for the given chat conversation. Learn more in the
- [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
- and [audio](/docs/guides/audio) guides.
-
- Parameter support can differ depending on the model used to generate the
- response, particularly for newer reasoning models. Parameters that are only
- supported for reasoning models are noted below. For the current state of
- unsupported parameters in reasoning models,
- [refer to the reasoning guide](/docs/guides/reasoning).
+ Creates a model response for the given chat conversation.
Args:
- messages: A list of messages comprising the conversation so far. Depending on the
- [model](/docs/models) you use, different message types (modalities) are
- supported, like [text](/docs/guides/text-generation),
- [images](/docs/guides/vision), and [audio](/docs/guides/audio).
-
- model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
- wide range of models with different capabilities, performance characteristics,
- and price points. Refer to the [model guide](/docs/models) to browse and compare
- available models.
+ messages: A list of messages comprising the conversation so far.
- audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
+ model: Model ID used to generate the response.
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- function_call: Deprecated in favor of `tool_choice`.
-
- Controls which (if any) function is called by the model.
-
- `none` means the model will not call a function and instead generates a message.
-
- `auto` means the model can pick between generating a message or calling a
- function.
-
- Specifying a particular function via `{"name": "my_function"}` forces the model
- to call that function.
-
- `none` is the default when no functions are present. `auto` is the default if
- functions are present.
-
- functions: Deprecated in favor of `tools`.
-
- A list of functions the model may generate JSON inputs for.
-
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
@@ -716,15 +258,14 @@ async def create(
returns the log probabilities of each output token returned in the `content` of
`message`.
- max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
- max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
- completion. This value can be used to control
- [costs](https://openai.com/api/pricing/) for text generated via API.
+ max_tokens: The maximum number of tokens that can be generated in the completion.
- This value is now deprecated in favor of `max_completion_tokens`, and is not
- compatible with [o1 series models](/docs/guides/reasoning).
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
@@ -733,84 +274,19 @@ async def create(
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
- modalities: Output types that you would like the model to generate. Most models are capable
- of generating text, which is the default:
-
- `["text"]`
-
- The `gpt-4o-audio-preview` model can also be used to
- [generate audio](/docs/guides/audio). To request that this model generate both
- text and audio responses, you can use:
-
- `["text", "audio"]`
-
n: How many chat completion choices to generate for each input message. Note that
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- prediction: Static predicted output content, such as the content of a text file that is
- being regenerated.
-
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: An object specifying the format that the model must output.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
-
- seed: This feature is in Beta. If specified, our system will make a best effort to
- sample deterministically, such that repeated requests with the same `seed` and
- parameters should return the same result. Determinism is not guaranteed, and you
- should refer to the `system_fingerprint` response parameter to monitor changes
- in the backend.
-
- service_tier: Specifies the latency tier to use for processing the request. This parameter is
- relevant for customers subscribed to the scale tier service:
-
- - If set to 'auto', and the Project is Scale tier enabled, the system will
- utilize scale tier credits until they are exhausted.
- - If set to 'auto', and the Project is not Scale tier enabled, the request will
- be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
- - If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
- - When not set, the default behavior is 'auto'.
-
- When this parameter is set, the response body will include the `service_tier`
- utilized.
-
stop: Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
- store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](/docs/guides/distillation) or
- [evals](/docs/guides/evals) products.
-
stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/chat/streaming) for more
- information, along with the
- [streaming responses](/docs/guides/streaming-responses) guide for more
- information on how to handle the streaming events.
+ generated using server-sent events.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -819,20 +295,6 @@ async def create(
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tool and instead generates a message. `auto` means the model can
- pick between generating a message or calling one or more tools. `required` means
- the model must call one or more tools. Specifying a particular tool via
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- `none` is the default when no tools are present. `auto` is the default if tools
- are present.
-
- tools: A list of tools the model may call. Currently, only functions are supported as a
- tool. Use this to provide a list of functions the model may generate JSON inputs
- for. A max of 128 functions are supported.
-
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
@@ -843,11 +305,8 @@ async def create(
We generally recommend altering this or `temperature` but not both.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
extra_headers: Send extra headers
@@ -863,35 +322,21 @@ async def create(
{
"messages": messages,
"model": model,
- "audio": audio,
"frequency_penalty": frequency_penalty,
- "function_call": function_call,
- "functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
- "modalities": modalities,
"n": n,
- "parallel_tool_calls": parallel_tool_calls,
- "prediction": prediction,
"presence_penalty": presence_penalty,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "seed": seed,
- "service_tier": service_tier,
"stop": stop,
- "store": store,
"stream": stream,
"stream_options": stream_options,
"temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
- "web_search_options": web_search_options,
},
completion_create_params.CompletionCreateParams,
),
@@ -901,241 +346,6 @@ async def create(
cast_to=CreateResponse,
)
- async def retrieve(
- self,
- completion_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """Get a stored chat completion.
-
- Only Chat Completions that have been created with
- the `store` parameter set to `true` will be returned.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return await self._get(
- f"/chat/completions/{completion_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- async def update(
- self,
- completion_id: str,
- *,
- metadata: Optional[Dict[str, str]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """Modify a stored chat completion.
-
- Only Chat Completions that have been created
- with the `store` parameter set to `true` can be modified. Currently, the only
- supported modification is to update the `metadata` field.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return await self._post(
- f"/chat/completions/{completion_id}",
- body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: str | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionListResponse:
- """List stored Chat Completions.
-
- Only Chat Completions that have been stored with
- the `store` parameter set to `true` will be returned.
-
- Args:
- after: Identifier for the last chat completion from the previous pagination request.
-
- limit: Number of Chat Completions to retrieve.
-
- metadata:
- A list of metadata keys to filter the Chat Completions by. Example:
-
- `metadata[key1]=value1&metadata[key2]=value2`
-
- model: The model used to generate the Chat Completions.
-
- order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
- `desc` for descending order. Defaults to `asc`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/chat/completions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "metadata": metadata,
- "model": model,
- "order": order,
- },
- completion_list_params.CompletionListParams,
- ),
- ),
- cast_to=CompletionListResponse,
- )
-
- async def delete(
- self,
- completion_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionDeleteResponse:
- """Delete a stored chat completion.
-
- Only Chat Completions that have been created
- with the `store` parameter set to `true` can be deleted.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return await self._delete(
- f"/chat/completions/{completion_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CompletionDeleteResponse,
- )
-
- async def list_messages(
- self,
- completion_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionListMessagesResponse:
- """Get the messages in a stored chat completion.
-
- Only Chat Completions that have
- been created with the `store` parameter set to `true` will be returned.
-
- Args:
- after: Identifier for the last message from the previous pagination request.
-
- limit: Number of messages to retrieve.
-
- order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
- for descending order. Defaults to `asc`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return await self._get(
- f"/chat/completions/{completion_id}/messages",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- },
- completion_list_messages_params.CompletionListMessagesParams,
- ),
- ),
- cast_to=CompletionListMessagesResponse,
- )
-
class CompletionsResourceWithRawResponse:
def __init__(self, completions: CompletionsResource) -> None:
@@ -1144,21 +354,6 @@ def __init__(self, completions: CompletionsResource) -> None:
self.create = to_raw_response_wrapper(
completions.create,
)
- self.retrieve = to_raw_response_wrapper(
- completions.retrieve,
- )
- self.update = to_raw_response_wrapper(
- completions.update,
- )
- self.list = to_raw_response_wrapper(
- completions.list,
- )
- self.delete = to_raw_response_wrapper(
- completions.delete,
- )
- self.list_messages = to_raw_response_wrapper(
- completions.list_messages,
- )
class AsyncCompletionsResourceWithRawResponse:
@@ -1168,21 +363,6 @@ def __init__(self, completions: AsyncCompletionsResource) -> None:
self.create = async_to_raw_response_wrapper(
completions.create,
)
- self.retrieve = async_to_raw_response_wrapper(
- completions.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- completions.update,
- )
- self.list = async_to_raw_response_wrapper(
- completions.list,
- )
- self.delete = async_to_raw_response_wrapper(
- completions.delete,
- )
- self.list_messages = async_to_raw_response_wrapper(
- completions.list_messages,
- )
class CompletionsResourceWithStreamingResponse:
@@ -1192,21 +372,6 @@ def __init__(self, completions: CompletionsResource) -> None:
self.create = to_streamed_response_wrapper(
completions.create,
)
- self.retrieve = to_streamed_response_wrapper(
- completions.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- completions.update,
- )
- self.list = to_streamed_response_wrapper(
- completions.list,
- )
- self.delete = to_streamed_response_wrapper(
- completions.delete,
- )
- self.list_messages = to_streamed_response_wrapper(
- completions.list_messages,
- )
class AsyncCompletionsResourceWithStreamingResponse:
@@ -1216,18 +381,3 @@ def __init__(self, completions: AsyncCompletionsResource) -> None:
self.create = async_to_streamed_response_wrapper(
completions.create,
)
- self.retrieve = async_to_streamed_response_wrapper(
- completions.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- completions.update,
- )
- self.list = async_to_streamed_response_wrapper(
- completions.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- completions.delete,
- )
- self.list_messages = async_to_streamed_response_wrapper(
- completions.list_messages,
- )
diff --git a/src/digitalocean_genai_sdk/resources/completions.py b/src/digitalocean_genai_sdk/resources/completions.py
deleted file mode 100644
index ff495166..00000000
--- a/src/digitalocean_genai_sdk/resources/completions.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import completion_create_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.stop_configuration_param import StopConfigurationParam
-from ..types.completion_create_response import CompletionCreateResponse
-from ..types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
-
-__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
-
-
-class CompletionsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> CompletionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return CompletionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return CompletionsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
- prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
- best_of: Optional[int] | NotGiven = NOT_GIVEN,
- echo: Optional[bool] | NotGiven = NOT_GIVEN,
- frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
- logprobs: Optional[int] | NotGiven = NOT_GIVEN,
- max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionCreateResponse:
- """
- Creates a completion for the provided prompt and parameters.
-
- Args:
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- prompt: The prompt(s) to generate completions for, encoded as a string, array of
- strings, array of tokens, or array of token arrays.
-
- Note that <|endoftext|> is the document separator that the model sees during
- training, so if a prompt is not specified the model will generate as if from the
- beginning of a new document.
-
- best_of: Generates `best_of` completions server-side and returns the "best" (the one with
- the highest log probability per token). Results cannot be streamed.
-
- When used with `n`, `best_of` controls the number of candidate completions and
- `n` specifies how many to return – `best_of` must be greater than `n`.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
-
- echo: Echo back the prompt in addition to the completion
-
- frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
- existing frequency in the text so far, decreasing the model's likelihood to
- repeat the same line verbatim.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
-
- logit_bias: Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the GPT
- tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
- Mathematically, the bias is added to the logits generated by the model prior to
- sampling. The exact effect will vary per model, but values between -1 and 1
- should decrease or increase likelihood of selection; values like -100 or 100
- should result in a ban or exclusive selection of the relevant token.
-
- As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
- from being generated.
-
- logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
- well the chosen tokens. For example, if `logprobs` is 5, the API will return a
- list of the 5 most likely tokens. The API will always return the `logprob` of
- the sampled token, so there may be up to `logprobs+1` elements in the response.
-
- The maximum value for `logprobs` is 5.
-
- max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
- completion.
-
- The token count of your prompt plus `max_tokens` cannot exceed the model's
- context length.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens.
-
- n: How many completions to generate for each prompt.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
-
- presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
- whether they appear in the text so far, increasing the model's likelihood to
- talk about new topics.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
-
- seed: If specified, our system will make a best effort to sample deterministically,
- such that repeated requests with the same `seed` and parameters should return
- the same result.
-
- Determinism is not guaranteed, and you should refer to the `system_fingerprint`
- response parameter to monitor changes in the backend.
-
- stop: Up to 4 sequences where the API will stop generating further tokens. The
- returned text will not contain the stop sequence.
-
- stream: Whether to stream back partial progress. If set, tokens will be sent as
- data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available, with the stream terminated by a `data: [DONE]`
- message.
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
-
- stream_options: Options for streaming response. Only set this when you set `stream: true`.
-
- suffix: The suffix that comes after a completion of inserted text.
-
- This parameter is only supported for `gpt-3.5-turbo-instruct`.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/completions",
- body=maybe_transform(
- {
- "model": model,
- "prompt": prompt,
- "best_of": best_of,
- "echo": echo,
- "frequency_penalty": frequency_penalty,
- "logit_bias": logit_bias,
- "logprobs": logprobs,
- "max_tokens": max_tokens,
- "n": n,
- "presence_penalty": presence_penalty,
- "seed": seed,
- "stop": stop,
- "stream": stream,
- "stream_options": stream_options,
- "suffix": suffix,
- "temperature": temperature,
- "top_p": top_p,
- "user": user,
- },
- completion_create_params.CompletionCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CompletionCreateResponse,
- )
-
-
-class AsyncCompletionsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncCompletionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncCompletionsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
- prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
- best_of: Optional[int] | NotGiven = NOT_GIVEN,
- echo: Optional[bool] | NotGiven = NOT_GIVEN,
- frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
- logprobs: Optional[int] | NotGiven = NOT_GIVEN,
- max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionCreateResponse:
- """
- Creates a completion for the provided prompt and parameters.
-
- Args:
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- prompt: The prompt(s) to generate completions for, encoded as a string, array of
- strings, array of tokens, or array of token arrays.
-
- Note that <|endoftext|> is the document separator that the model sees during
- training, so if a prompt is not specified the model will generate as if from the
- beginning of a new document.
-
- best_of: Generates `best_of` completions server-side and returns the "best" (the one with
- the highest log probability per token). Results cannot be streamed.
-
- When used with `n`, `best_of` controls the number of candidate completions and
- `n` specifies how many to return – `best_of` must be greater than `n`.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
-
- echo: Echo back the prompt in addition to the completion
-
- frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
- existing frequency in the text so far, decreasing the model's likelihood to
- repeat the same line verbatim.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
-
- logit_bias: Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the GPT
- tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
- Mathematically, the bias is added to the logits generated by the model prior to
- sampling. The exact effect will vary per model, but values between -1 and 1
- should decrease or increase likelihood of selection; values like -100 or 100
- should result in a ban or exclusive selection of the relevant token.
-
- As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
- from being generated.
-
- logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
- well the chosen tokens. For example, if `logprobs` is 5, the API will return a
- list of the 5 most likely tokens. The API will always return the `logprob` of
- the sampled token, so there may be up to `logprobs+1` elements in the response.
-
- The maximum value for `logprobs` is 5.
-
- max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
- completion.
-
- The token count of your prompt plus `max_tokens` cannot exceed the model's
- context length.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens.
-
- n: How many completions to generate for each prompt.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
-
- presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
- whether they appear in the text so far, increasing the model's likelihood to
- talk about new topics.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
-
- seed: If specified, our system will make a best effort to sample deterministically,
- such that repeated requests with the same `seed` and parameters should return
- the same result.
-
- Determinism is not guaranteed, and you should refer to the `system_fingerprint`
- response parameter to monitor changes in the backend.
-
- stop: Up to 4 sequences where the API will stop generating further tokens. The
- returned text will not contain the stop sequence.
-
- stream: Whether to stream back partial progress. If set, tokens will be sent as
- data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available, with the stream terminated by a `data: [DONE]`
- message.
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
-
- stream_options: Options for streaming response. Only set this when you set `stream: true`.
-
- suffix: The suffix that comes after a completion of inserted text.
-
- This parameter is only supported for `gpt-3.5-turbo-instruct`.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/completions",
- body=await async_maybe_transform(
- {
- "model": model,
- "prompt": prompt,
- "best_of": best_of,
- "echo": echo,
- "frequency_penalty": frequency_penalty,
- "logit_bias": logit_bias,
- "logprobs": logprobs,
- "max_tokens": max_tokens,
- "n": n,
- "presence_penalty": presence_penalty,
- "seed": seed,
- "stop": stop,
- "stream": stream,
- "stream_options": stream_options,
- "suffix": suffix,
- "temperature": temperature,
- "top_p": top_p,
- "user": user,
- },
- completion_create_params.CompletionCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CompletionCreateResponse,
- )
-
-
-class CompletionsResourceWithRawResponse:
- def __init__(self, completions: CompletionsResource) -> None:
- self._completions = completions
-
- self.create = to_raw_response_wrapper(
- completions.create,
- )
-
-
-class AsyncCompletionsResourceWithRawResponse:
- def __init__(self, completions: AsyncCompletionsResource) -> None:
- self._completions = completions
-
- self.create = async_to_raw_response_wrapper(
- completions.create,
- )
-
-
-class CompletionsResourceWithStreamingResponse:
- def __init__(self, completions: CompletionsResource) -> None:
- self._completions = completions
-
- self.create = to_streamed_response_wrapper(
- completions.create,
- )
-
-
-class AsyncCompletionsResourceWithStreamingResponse:
- def __init__(self, completions: AsyncCompletionsResource) -> None:
- self._completions = completions
-
- self.create = async_to_streamed_response_wrapper(
- completions.create,
- )
diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/digitalocean_genai_sdk/resources/embeddings.py
index 92552f62..1bcd3145 100644
--- a/src/digitalocean_genai_sdk/resources/embeddings.py
+++ b/src/digitalocean_genai_sdk/resources/embeddings.py
@@ -2,8 +2,7 @@
from __future__ import annotations
-from typing import List, Union, Iterable
-from typing_extensions import Literal
+from typing import List, Union
import httpx
@@ -31,7 +30,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers
"""
return EmbeddingsResourceWithRawResponse(self)
@@ -40,17 +39,15 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
+ For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response
"""
return EmbeddingsResourceWithStreamingResponse(self)
def create(
self,
*,
- input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]],
- model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]],
- dimensions: int | NotGiven = NOT_GIVEN,
- encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
+ input: Union[str, List[str]],
+ model: str,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -64,26 +61,13 @@ def create(
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
- inputs in a single request, pass an array of strings or array of token arrays.
- The input must not exceed the max input tokens for the model (8192 tokens for
- `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
- dimensions or less.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens. Some models may also impose a limit on total number of
- tokens summed across inputs.
+ inputs in a single request, pass an array of strings.
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
+ model: ID of the model to use. You can use the List models API to see all of your
+ available models.
- dimensions: The number of dimensions the resulting output embeddings should have. Only
- supported in `text-embedding-3` and later models.
-
- encoding_format: The format to return the embeddings in. Can be either `float` or
- [`base64`](https://pypi.org/project/pybase64/).
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
extra_headers: Send extra headers
@@ -99,8 +83,6 @@ def create(
{
"input": input,
"model": model,
- "dimensions": dimensions,
- "encoding_format": encoding_format,
"user": user,
},
embedding_create_params.EmbeddingCreateParams,
@@ -119,7 +101,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers
"""
return AsyncEmbeddingsResourceWithRawResponse(self)
@@ -128,17 +110,15 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
+ For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response
"""
return AsyncEmbeddingsResourceWithStreamingResponse(self)
async def create(
self,
*,
- input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]],
- model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]],
- dimensions: int | NotGiven = NOT_GIVEN,
- encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
+ input: Union[str, List[str]],
+ model: str,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -152,26 +132,13 @@ async def create(
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
- inputs in a single request, pass an array of strings or array of token arrays.
- The input must not exceed the max input tokens for the model (8192 tokens for
- `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
- dimensions or less.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens. Some models may also impose a limit on total number of
- tokens summed across inputs.
-
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- dimensions: The number of dimensions the resulting output embeddings should have. Only
- supported in `text-embedding-3` and later models.
+ inputs in a single request, pass an array of strings.
- encoding_format: The format to return the embeddings in. Can be either `float` or
- [`base64`](https://pypi.org/project/pybase64/).
+ model: ID of the model to use. You can use the List models API to see all of your
+ available models.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
extra_headers: Send extra headers
@@ -187,8 +154,6 @@ async def create(
{
"input": input,
"model": model,
- "dimensions": dimensions,
- "encoding_format": encoding_format,
"user": user,
},
embedding_create_params.EmbeddingCreateParams,
diff --git a/src/digitalocean_genai_sdk/resources/files.py b/src/digitalocean_genai_sdk/resources/files.py
deleted file mode 100644
index 65e459f4..00000000
--- a/src/digitalocean_genai_sdk/resources/files.py
+++ /dev/null
@@ -1,608 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Mapping, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import file_list_params, file_upload_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.openai_file import OpenAIFile
-from ..types.file_list_response import FileListResponse
-from ..types.file_delete_response import FileDeleteResponse
-
-__all__ = ["FilesResource", "AsyncFilesResource"]
-
-
-class FilesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> FilesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return FilesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FilesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return FilesResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIFile:
- """
- Returns information about a specific file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._get(
- f"/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIFile,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- purpose: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileListResponse:
- """Returns a list of files.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 10,000, and the default is 10,000.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- purpose: Only return files with the given purpose.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- "purpose": purpose,
- },
- file_list_params.FileListParams,
- ),
- ),
- cast_to=FileListResponse,
- )
-
- def delete(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """
- Delete a file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._delete(
- f"/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileDeleteResponse,
- )
-
- def retrieve_content(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> str:
- """
- Returns the contents of the specified file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._get(
- f"/files/{file_id}/content",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=str,
- )
-
- def upload(
- self,
- *,
- file: FileTypes,
- purpose: Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIFile:
- """Upload a file that can be used across various endpoints.
-
- Individual files can be
- up to 512 MB, and the size of all files uploaded by one organization can be up
- to 100 GB.
-
- The Assistants API supports files up to 2 million tokens and of specific file
- types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
-
- The Fine-tuning API only supports `.jsonl` files. The input also has certain
- required formats for fine-tuning
- [chat](/docs/api-reference/fine-tuning/chat-input) or
- [completions](/docs/api-reference/fine-tuning/completions-input) models.
-
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
- has a specific required [format](/docs/api-reference/batch/request-input).
-
- Please [contact us](https://help.openai.com/) if you need to increase these
- storage limits.
-
- Args:
- file: The File object (not file name) to be uploaded.
-
- purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
- Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
- fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
- Flexible file type for any purpose - `evals`: Used for eval data sets
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "purpose": purpose,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/files",
- body=maybe_transform(body, file_upload_params.FileUploadParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIFile,
- )
-
-
-class AsyncFilesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncFilesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncFilesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncFilesResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIFile:
- """
- Returns information about a specific file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._get(
- f"/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIFile,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- purpose: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileListResponse:
- """Returns a list of files.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 10,000, and the default is 10,000.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- purpose: Only return files with the given purpose.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- "purpose": purpose,
- },
- file_list_params.FileListParams,
- ),
- ),
- cast_to=FileListResponse,
- )
-
- async def delete(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """
- Delete a file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._delete(
- f"/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileDeleteResponse,
- )
-
- async def retrieve_content(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> str:
- """
- Returns the contents of the specified file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._get(
- f"/files/{file_id}/content",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=str,
- )
-
- async def upload(
- self,
- *,
- file: FileTypes,
- purpose: Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIFile:
- """Upload a file that can be used across various endpoints.
-
- Individual files can be
- up to 512 MB, and the size of all files uploaded by one organization can be up
- to 100 GB.
-
- The Assistants API supports files up to 2 million tokens and of specific file
- types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
-
- The Fine-tuning API only supports `.jsonl` files. The input also has certain
- required formats for fine-tuning
- [chat](/docs/api-reference/fine-tuning/chat-input) or
- [completions](/docs/api-reference/fine-tuning/completions-input) models.
-
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
- has a specific required [format](/docs/api-reference/batch/request-input).
-
- Please [contact us](https://help.openai.com/) if you need to increase these
- storage limits.
-
- Args:
- file: The File object (not file name) to be uploaded.
-
- purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
- Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
- fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
- Flexible file type for any purpose - `evals`: Used for eval data sets
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "purpose": purpose,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/files",
- body=await async_maybe_transform(body, file_upload_params.FileUploadParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIFile,
- )
-
-
-class FilesResourceWithRawResponse:
- def __init__(self, files: FilesResource) -> None:
- self._files = files
-
- self.retrieve = to_raw_response_wrapper(
- files.retrieve,
- )
- self.list = to_raw_response_wrapper(
- files.list,
- )
- self.delete = to_raw_response_wrapper(
- files.delete,
- )
- self.retrieve_content = to_raw_response_wrapper(
- files.retrieve_content,
- )
- self.upload = to_raw_response_wrapper(
- files.upload,
- )
-
-
-class AsyncFilesResourceWithRawResponse:
- def __init__(self, files: AsyncFilesResource) -> None:
- self._files = files
-
- self.retrieve = async_to_raw_response_wrapper(
- files.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- files.list,
- )
- self.delete = async_to_raw_response_wrapper(
- files.delete,
- )
- self.retrieve_content = async_to_raw_response_wrapper(
- files.retrieve_content,
- )
- self.upload = async_to_raw_response_wrapper(
- files.upload,
- )
-
-
-class FilesResourceWithStreamingResponse:
- def __init__(self, files: FilesResource) -> None:
- self._files = files
-
- self.retrieve = to_streamed_response_wrapper(
- files.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- files.list,
- )
- self.delete = to_streamed_response_wrapper(
- files.delete,
- )
- self.retrieve_content = to_streamed_response_wrapper(
- files.retrieve_content,
- )
- self.upload = to_streamed_response_wrapper(
- files.upload,
- )
-
-
-class AsyncFilesResourceWithStreamingResponse:
- def __init__(self, files: AsyncFilesResource) -> None:
- self._files = files
-
- self.retrieve = async_to_streamed_response_wrapper(
- files.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- files.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- files.delete,
- )
- self.retrieve_content = async_to_streamed_response_wrapper(
- files.retrieve_content,
- )
- self.upload = async_to_streamed_response_wrapper(
- files.upload,
- )
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py
deleted file mode 100644
index 5f198d2e..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .jobs import (
- JobsResource,
- AsyncJobsResource,
- JobsResourceWithRawResponse,
- AsyncJobsResourceWithRawResponse,
- JobsResourceWithStreamingResponse,
- AsyncJobsResourceWithStreamingResponse,
-)
-from .checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-from .fine_tuning import (
- FineTuningResource,
- AsyncFineTuningResource,
- FineTuningResourceWithRawResponse,
- AsyncFineTuningResourceWithRawResponse,
- FineTuningResourceWithStreamingResponse,
- AsyncFineTuningResourceWithStreamingResponse,
-)
-
-__all__ = [
- "CheckpointsResource",
- "AsyncCheckpointsResource",
- "CheckpointsResourceWithRawResponse",
- "AsyncCheckpointsResourceWithRawResponse",
- "CheckpointsResourceWithStreamingResponse",
- "AsyncCheckpointsResourceWithStreamingResponse",
- "JobsResource",
- "AsyncJobsResource",
- "JobsResourceWithRawResponse",
- "AsyncJobsResourceWithRawResponse",
- "JobsResourceWithStreamingResponse",
- "AsyncJobsResourceWithStreamingResponse",
- "FineTuningResource",
- "AsyncFineTuningResource",
- "FineTuningResourceWithRawResponse",
- "AsyncFineTuningResourceWithRawResponse",
- "FineTuningResourceWithStreamingResponse",
- "AsyncFineTuningResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py
deleted file mode 100644
index 3f6710f0..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-from .permissions import (
- PermissionsResource,
- AsyncPermissionsResource,
- PermissionsResourceWithRawResponse,
- AsyncPermissionsResourceWithRawResponse,
- PermissionsResourceWithStreamingResponse,
- AsyncPermissionsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "PermissionsResource",
- "AsyncPermissionsResource",
- "PermissionsResourceWithRawResponse",
- "AsyncPermissionsResourceWithRawResponse",
- "PermissionsResourceWithStreamingResponse",
- "AsyncPermissionsResourceWithStreamingResponse",
- "CheckpointsResource",
- "AsyncCheckpointsResource",
- "CheckpointsResourceWithRawResponse",
- "AsyncCheckpointsResourceWithRawResponse",
- "CheckpointsResourceWithStreamingResponse",
- "AsyncCheckpointsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py
deleted file mode 100644
index b1a85058..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from ...._compat import cached_property
-from .permissions import (
- PermissionsResource,
- AsyncPermissionsResource,
- PermissionsResourceWithRawResponse,
- AsyncPermissionsResourceWithRawResponse,
- PermissionsResourceWithStreamingResponse,
- AsyncPermissionsResourceWithStreamingResponse,
-)
-from ...._resource import SyncAPIResource, AsyncAPIResource
-
-__all__ = ["CheckpointsResource", "AsyncCheckpointsResource"]
-
-
-class CheckpointsResource(SyncAPIResource):
- @cached_property
- def permissions(self) -> PermissionsResource:
- return PermissionsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> CheckpointsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return CheckpointsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return CheckpointsResourceWithStreamingResponse(self)
-
-
-class AsyncCheckpointsResource(AsyncAPIResource):
- @cached_property
- def permissions(self) -> AsyncPermissionsResource:
- return AsyncPermissionsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncCheckpointsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncCheckpointsResourceWithStreamingResponse(self)
-
-
-class CheckpointsResourceWithRawResponse:
- def __init__(self, checkpoints: CheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- @cached_property
- def permissions(self) -> PermissionsResourceWithRawResponse:
- return PermissionsResourceWithRawResponse(self._checkpoints.permissions)
-
-
-class AsyncCheckpointsResourceWithRawResponse:
- def __init__(self, checkpoints: AsyncCheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- @cached_property
- def permissions(self) -> AsyncPermissionsResourceWithRawResponse:
- return AsyncPermissionsResourceWithRawResponse(self._checkpoints.permissions)
-
-
-class CheckpointsResourceWithStreamingResponse:
- def __init__(self, checkpoints: CheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- @cached_property
- def permissions(self) -> PermissionsResourceWithStreamingResponse:
- return PermissionsResourceWithStreamingResponse(self._checkpoints.permissions)
-
-
-class AsyncCheckpointsResourceWithStreamingResponse:
- def __init__(self, checkpoints: AsyncCheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- @cached_property
- def permissions(self) -> AsyncPermissionsResourceWithStreamingResponse:
- return AsyncPermissionsResourceWithStreamingResponse(self._checkpoints.permissions)
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py
deleted file mode 100644
index 0dee4435..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py
+++ /dev/null
@@ -1,401 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params
-from ....types.fine_tuning.checkpoints.permission_delete_response import PermissionDeleteResponse
-from ....types.fine_tuning.checkpoints.list_fine_tuning_checkpoint_permission import ListFineTuningCheckpointPermission
-
-__all__ = ["PermissionsResource", "AsyncPermissionsResource"]
-
-
-class PermissionsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> PermissionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return PermissionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> PermissionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return PermissionsResourceWithStreamingResponse(self)
-
- def create(
- self,
- permission_id: str,
- *,
- project_ids: List[str],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListFineTuningCheckpointPermission:
- """
- **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
-
- This enables organization owners to share fine-tuned models with other projects
- in their organization.
-
- Args:
- project_ids: The project identifiers to grant access to.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return self._post(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ListFineTuningCheckpointPermission,
- )
-
- def retrieve(
- self,
- permission_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN,
- project_id: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListFineTuningCheckpointPermission:
- """
- **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
- Organization owners can use this endpoint to view all permissions for a
- fine-tuned model checkpoint.
-
- Args:
- after: Identifier for the last permission ID from the previous pagination request.
-
- limit: Number of permissions to retrieve.
-
- order: The order in which to retrieve permissions.
-
- project_id: The ID of the project to get permissions for.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return self._get(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- "project_id": project_id,
- },
- permission_retrieve_params.PermissionRetrieveParams,
- ),
- ),
- cast_to=ListFineTuningCheckpointPermission,
- )
-
- def delete(
- self,
- permission_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> PermissionDeleteResponse:
- """
- **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
- Organization owners can use this endpoint to delete a permission for a
- fine-tuned model checkpoint.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return self._delete(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=PermissionDeleteResponse,
- )
-
-
-class AsyncPermissionsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncPermissionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncPermissionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncPermissionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncPermissionsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- permission_id: str,
- *,
- project_ids: List[str],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListFineTuningCheckpointPermission:
- """
- **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
-
- This enables organization owners to share fine-tuned models with other projects
- in their organization.
-
- Args:
- project_ids: The project identifiers to grant access to.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return await self._post(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- body=await async_maybe_transform(
- {"project_ids": project_ids}, permission_create_params.PermissionCreateParams
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ListFineTuningCheckpointPermission,
- )
-
- async def retrieve(
- self,
- permission_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN,
- project_id: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListFineTuningCheckpointPermission:
- """
- **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
- Organization owners can use this endpoint to view all permissions for a
- fine-tuned model checkpoint.
-
- Args:
- after: Identifier for the last permission ID from the previous pagination request.
-
- limit: Number of permissions to retrieve.
-
- order: The order in which to retrieve permissions.
-
- project_id: The ID of the project to get permissions for.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return await self._get(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- "project_id": project_id,
- },
- permission_retrieve_params.PermissionRetrieveParams,
- ),
- ),
- cast_to=ListFineTuningCheckpointPermission,
- )
-
- async def delete(
- self,
- permission_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> PermissionDeleteResponse:
- """
- **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
- Organization owners can use this endpoint to delete a permission for a
- fine-tuned model checkpoint.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return await self._delete(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=PermissionDeleteResponse,
- )
-
-
-class PermissionsResourceWithRawResponse:
- def __init__(self, permissions: PermissionsResource) -> None:
- self._permissions = permissions
-
- self.create = to_raw_response_wrapper(
- permissions.create,
- )
- self.retrieve = to_raw_response_wrapper(
- permissions.retrieve,
- )
- self.delete = to_raw_response_wrapper(
- permissions.delete,
- )
-
-
-class AsyncPermissionsResourceWithRawResponse:
- def __init__(self, permissions: AsyncPermissionsResource) -> None:
- self._permissions = permissions
-
- self.create = async_to_raw_response_wrapper(
- permissions.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- permissions.retrieve,
- )
- self.delete = async_to_raw_response_wrapper(
- permissions.delete,
- )
-
-
-class PermissionsResourceWithStreamingResponse:
- def __init__(self, permissions: PermissionsResource) -> None:
- self._permissions = permissions
-
- self.create = to_streamed_response_wrapper(
- permissions.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- permissions.retrieve,
- )
- self.delete = to_streamed_response_wrapper(
- permissions.delete,
- )
-
-
-class AsyncPermissionsResourceWithStreamingResponse:
- def __init__(self, permissions: AsyncPermissionsResource) -> None:
- self._permissions = permissions
-
- self.create = async_to_streamed_response_wrapper(
- permissions.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- permissions.retrieve,
- )
- self.delete = async_to_streamed_response_wrapper(
- permissions.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py b/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py
deleted file mode 100644
index 8b4956b1..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from ..._compat import cached_property
-from .jobs.jobs import (
- JobsResource,
- AsyncJobsResource,
- JobsResourceWithRawResponse,
- AsyncJobsResourceWithRawResponse,
- JobsResourceWithStreamingResponse,
- AsyncJobsResourceWithStreamingResponse,
-)
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from .checkpoints.checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-
-__all__ = ["FineTuningResource", "AsyncFineTuningResource"]
-
-
-class FineTuningResource(SyncAPIResource):
- @cached_property
- def checkpoints(self) -> CheckpointsResource:
- return CheckpointsResource(self._client)
-
- @cached_property
- def jobs(self) -> JobsResource:
- return JobsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> FineTuningResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return FineTuningResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FineTuningResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return FineTuningResourceWithStreamingResponse(self)
-
-
-class AsyncFineTuningResource(AsyncAPIResource):
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResource:
- return AsyncCheckpointsResource(self._client)
-
- @cached_property
- def jobs(self) -> AsyncJobsResource:
- return AsyncJobsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncFineTuningResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncFineTuningResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFineTuningResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncFineTuningResourceWithStreamingResponse(self)
-
-
-class FineTuningResourceWithRawResponse:
- def __init__(self, fine_tuning: FineTuningResource) -> None:
- self._fine_tuning = fine_tuning
-
- @cached_property
- def checkpoints(self) -> CheckpointsResourceWithRawResponse:
- return CheckpointsResourceWithRawResponse(self._fine_tuning.checkpoints)
-
- @cached_property
- def jobs(self) -> JobsResourceWithRawResponse:
- return JobsResourceWithRawResponse(self._fine_tuning.jobs)
-
-
-class AsyncFineTuningResourceWithRawResponse:
- def __init__(self, fine_tuning: AsyncFineTuningResource) -> None:
- self._fine_tuning = fine_tuning
-
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResourceWithRawResponse:
- return AsyncCheckpointsResourceWithRawResponse(self._fine_tuning.checkpoints)
-
- @cached_property
- def jobs(self) -> AsyncJobsResourceWithRawResponse:
- return AsyncJobsResourceWithRawResponse(self._fine_tuning.jobs)
-
-
-class FineTuningResourceWithStreamingResponse:
- def __init__(self, fine_tuning: FineTuningResource) -> None:
- self._fine_tuning = fine_tuning
-
- @cached_property
- def checkpoints(self) -> CheckpointsResourceWithStreamingResponse:
- return CheckpointsResourceWithStreamingResponse(self._fine_tuning.checkpoints)
-
- @cached_property
- def jobs(self) -> JobsResourceWithStreamingResponse:
- return JobsResourceWithStreamingResponse(self._fine_tuning.jobs)
-
-
-class AsyncFineTuningResourceWithStreamingResponse:
- def __init__(self, fine_tuning: AsyncFineTuningResource) -> None:
- self._fine_tuning = fine_tuning
-
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResourceWithStreamingResponse:
- return AsyncCheckpointsResourceWithStreamingResponse(self._fine_tuning.checkpoints)
-
- @cached_property
- def jobs(self) -> AsyncJobsResourceWithStreamingResponse:
- return AsyncJobsResourceWithStreamingResponse(self._fine_tuning.jobs)
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py
deleted file mode 100644
index 90e643d7..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .jobs import (
- JobsResource,
- AsyncJobsResource,
- JobsResourceWithRawResponse,
- AsyncJobsResourceWithRawResponse,
- JobsResourceWithStreamingResponse,
- AsyncJobsResourceWithStreamingResponse,
-)
-from .events import (
- EventsResource,
- AsyncEventsResource,
- EventsResourceWithRawResponse,
- AsyncEventsResourceWithRawResponse,
- EventsResourceWithStreamingResponse,
- AsyncEventsResourceWithStreamingResponse,
-)
-from .checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "CheckpointsResource",
- "AsyncCheckpointsResource",
- "CheckpointsResourceWithRawResponse",
- "AsyncCheckpointsResourceWithRawResponse",
- "CheckpointsResourceWithStreamingResponse",
- "AsyncCheckpointsResourceWithStreamingResponse",
- "EventsResource",
- "AsyncEventsResource",
- "EventsResourceWithRawResponse",
- "AsyncEventsResourceWithRawResponse",
- "EventsResourceWithStreamingResponse",
- "AsyncEventsResourceWithStreamingResponse",
- "JobsResource",
- "AsyncJobsResource",
- "JobsResourceWithRawResponse",
- "AsyncJobsResourceWithRawResponse",
- "JobsResourceWithStreamingResponse",
- "AsyncJobsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py
deleted file mode 100644
index d9ade070..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.fine_tuning.jobs import checkpoint_retrieve_params
-from ....types.fine_tuning.jobs.checkpoint_retrieve_response import CheckpointRetrieveResponse
-
-__all__ = ["CheckpointsResource", "AsyncCheckpointsResource"]
-
-
-class CheckpointsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> CheckpointsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return CheckpointsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return CheckpointsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CheckpointRetrieveResponse:
- """
- List checkpoints for a fine-tuning job.
-
- Args:
- after: Identifier for the last checkpoint ID from the previous pagination request.
-
- limit: Number of checkpoints to retrieve.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- checkpoint_retrieve_params.CheckpointRetrieveParams,
- ),
- ),
- cast_to=CheckpointRetrieveResponse,
- )
-
-
-class AsyncCheckpointsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncCheckpointsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncCheckpointsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CheckpointRetrieveResponse:
- """
- List checkpoints for a fine-tuning job.
-
- Args:
- after: Identifier for the last checkpoint ID from the previous pagination request.
-
- limit: Number of checkpoints to retrieve.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return await self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- checkpoint_retrieve_params.CheckpointRetrieveParams,
- ),
- ),
- cast_to=CheckpointRetrieveResponse,
- )
-
-
-class CheckpointsResourceWithRawResponse:
- def __init__(self, checkpoints: CheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- self.retrieve = to_raw_response_wrapper(
- checkpoints.retrieve,
- )
-
-
-class AsyncCheckpointsResourceWithRawResponse:
- def __init__(self, checkpoints: AsyncCheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- self.retrieve = async_to_raw_response_wrapper(
- checkpoints.retrieve,
- )
-
-
-class CheckpointsResourceWithStreamingResponse:
- def __init__(self, checkpoints: CheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- self.retrieve = to_streamed_response_wrapper(
- checkpoints.retrieve,
- )
-
-
-class AsyncCheckpointsResourceWithStreamingResponse:
- def __init__(self, checkpoints: AsyncCheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- self.retrieve = async_to_streamed_response_wrapper(
- checkpoints.retrieve,
- )
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py
deleted file mode 100644
index 6005084f..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.fine_tuning.jobs import event_retrieve_params
-from ....types.fine_tuning.jobs.event_retrieve_response import EventRetrieveResponse
-
-__all__ = ["EventsResource", "AsyncEventsResource"]
-
-
-class EventsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> EventsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return EventsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> EventsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return EventsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> EventRetrieveResponse:
- """
- Get status updates for a fine-tuning job.
-
- Args:
- after: Identifier for the last event from the previous pagination request.
-
- limit: Number of events to retrieve.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- event_retrieve_params.EventRetrieveParams,
- ),
- ),
- cast_to=EventRetrieveResponse,
- )
-
-
-class AsyncEventsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncEventsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncEventsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncEventsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncEventsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> EventRetrieveResponse:
- """
- Get status updates for a fine-tuning job.
-
- Args:
- after: Identifier for the last event from the previous pagination request.
-
- limit: Number of events to retrieve.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return await self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- event_retrieve_params.EventRetrieveParams,
- ),
- ),
- cast_to=EventRetrieveResponse,
- )
-
-
-class EventsResourceWithRawResponse:
- def __init__(self, events: EventsResource) -> None:
- self._events = events
-
- self.retrieve = to_raw_response_wrapper(
- events.retrieve,
- )
-
-
-class AsyncEventsResourceWithRawResponse:
- def __init__(self, events: AsyncEventsResource) -> None:
- self._events = events
-
- self.retrieve = async_to_raw_response_wrapper(
- events.retrieve,
- )
-
-
-class EventsResourceWithStreamingResponse:
- def __init__(self, events: EventsResource) -> None:
- self._events = events
-
- self.retrieve = to_streamed_response_wrapper(
- events.retrieve,
- )
-
-
-class AsyncEventsResourceWithStreamingResponse:
- def __init__(self, events: AsyncEventsResource) -> None:
- self._events = events
-
- self.retrieve = async_to_streamed_response_wrapper(
- events.retrieve,
- )
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py
deleted file mode 100644
index 86a7ae4b..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py
+++ /dev/null
@@ -1,668 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from .events import (
- EventsResource,
- AsyncEventsResource,
- EventsResourceWithRawResponse,
- AsyncEventsResourceWithRawResponse,
- EventsResourceWithStreamingResponse,
- AsyncEventsResourceWithStreamingResponse,
-)
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from .checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.fine_tuning import job_list_params, job_create_params
-from ....types.fine_tuning.fine_tuning_job import FineTuningJob
-from ....types.fine_tuning.job_list_response import JobListResponse
-from ....types.fine_tuning.fine_tune_method_param import FineTuneMethodParam
-
-__all__ = ["JobsResource", "AsyncJobsResource"]
-
-
-class JobsResource(SyncAPIResource):
- @cached_property
- def checkpoints(self) -> CheckpointsResource:
- return CheckpointsResource(self._client)
-
- @cached_property
- def events(self) -> EventsResource:
- return EventsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> JobsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return JobsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> JobsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return JobsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
- training_file: str,
- hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
- integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- method: FineTuneMethodParam | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- validation_file: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Creates a fine-tuning job which begins the process of creating a new model from
- a given dataset.
-
- Response includes details of the enqueued job including job status and the name
- of the fine-tuned models once complete.
-
- [Learn more about fine-tuning](/docs/guides/fine-tuning)
-
- Args:
- model: The name of the model to fine-tune. You can select one of the
- [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
-
- training_file: The ID of an uploaded file that contains training data.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your dataset must be formatted as a JSONL file. Additionally, you must upload
- your file with the purpose `fine-tune`.
-
- The contents of the file should differ depending on if the model uses the
- [chat](/docs/api-reference/fine-tuning/chat-input),
- [completions](/docs/api-reference/fine-tuning/completions-input) format, or if
- the fine-tuning method uses the
- [preference](/docs/api-reference/fine-tuning/preference-input) format.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
-
- hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
- in favor of `method`, and should be passed in under the `method` parameter.
-
- integrations: A list of integrations to enable for your fine-tuning job.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- method: The method used for fine-tuning.
-
- seed: The seed controls the reproducibility of the job. Passing in the same seed and
- job parameters should produce the same results, but may differ in rare cases. If
- a seed is not specified, one will be generated for you.
-
- suffix: A string of up to 64 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
-
- validation_file: The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the fine-tuning
- results file. The same data should not be present in both train and validation
- files.
-
- Your dataset must be formatted as a JSONL file. You must upload your file with
- the purpose `fine-tune`.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/fine_tuning/jobs",
- body=maybe_transform(
- {
- "model": model,
- "training_file": training_file,
- "hyperparameters": hyperparameters,
- "integrations": integrations,
- "metadata": metadata,
- "method": method,
- "seed": seed,
- "suffix": suffix,
- "validation_file": validation_file,
- },
- job_create_params.JobCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
- def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Get info about a fine-tuning job.
-
- [Learn more about fine-tuning](/docs/guides/fine-tuning)
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> JobListResponse:
- """
- List your organization's fine-tuning jobs
-
- Args:
- after: Identifier for the last job from the previous pagination request.
-
- limit: Number of fine-tuning jobs to retrieve.
-
- metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
- Alternatively, set `metadata=null` to indicate no metadata.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/fine_tuning/jobs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "metadata": metadata,
- },
- job_list_params.JobListParams,
- ),
- ),
- cast_to=JobListResponse,
- )
-
- def cancel(
- self,
- fine_tuning_job_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Immediately cancel a fine-tune job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
-
-class AsyncJobsResource(AsyncAPIResource):
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResource:
- return AsyncCheckpointsResource(self._client)
-
- @cached_property
- def events(self) -> AsyncEventsResource:
- return AsyncEventsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncJobsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncJobsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncJobsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
- training_file: str,
- hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
- integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- method: FineTuneMethodParam | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- validation_file: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Creates a fine-tuning job which begins the process of creating a new model from
- a given dataset.
-
- Response includes details of the enqueued job including job status and the name
- of the fine-tuned models once complete.
-
- [Learn more about fine-tuning](/docs/guides/fine-tuning)
-
- Args:
- model: The name of the model to fine-tune. You can select one of the
- [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
-
- training_file: The ID of an uploaded file that contains training data.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your dataset must be formatted as a JSONL file. Additionally, you must upload
- your file with the purpose `fine-tune`.
-
- The contents of the file should differ depending on if the model uses the
- [chat](/docs/api-reference/fine-tuning/chat-input),
- [completions](/docs/api-reference/fine-tuning/completions-input) format, or if
- the fine-tuning method uses the
- [preference](/docs/api-reference/fine-tuning/preference-input) format.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
-
- hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
- in favor of `method`, and should be passed in under the `method` parameter.
-
- integrations: A list of integrations to enable for your fine-tuning job.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- method: The method used for fine-tuning.
-
- seed: The seed controls the reproducibility of the job. Passing in the same seed and
- job parameters should produce the same results, but may differ in rare cases. If
- a seed is not specified, one will be generated for you.
-
- suffix: A string of up to 64 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
-
- validation_file: The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the fine-tuning
- results file. The same data should not be present in both train and validation
- files.
-
- Your dataset must be formatted as a JSONL file. You must upload your file with
- the purpose `fine-tune`.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/fine_tuning/jobs",
- body=await async_maybe_transform(
- {
- "model": model,
- "training_file": training_file,
- "hyperparameters": hyperparameters,
- "integrations": integrations,
- "metadata": metadata,
- "method": method,
- "seed": seed,
- "suffix": suffix,
- "validation_file": validation_file,
- },
- job_create_params.JobCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
- async def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Get info about a fine-tuning job.
-
- [Learn more about fine-tuning](/docs/guides/fine-tuning)
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return await self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> JobListResponse:
- """
- List your organization's fine-tuning jobs
-
- Args:
- after: Identifier for the last job from the previous pagination request.
-
- limit: Number of fine-tuning jobs to retrieve.
-
- metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
- Alternatively, set `metadata=null` to indicate no metadata.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/fine_tuning/jobs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "metadata": metadata,
- },
- job_list_params.JobListParams,
- ),
- ),
- cast_to=JobListResponse,
- )
-
- async def cancel(
- self,
- fine_tuning_job_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Immediately cancel a fine-tune job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return await self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
-
-class JobsResourceWithRawResponse:
- def __init__(self, jobs: JobsResource) -> None:
- self._jobs = jobs
-
- self.create = to_raw_response_wrapper(
- jobs.create,
- )
- self.retrieve = to_raw_response_wrapper(
- jobs.retrieve,
- )
- self.list = to_raw_response_wrapper(
- jobs.list,
- )
- self.cancel = to_raw_response_wrapper(
- jobs.cancel,
- )
-
- @cached_property
- def checkpoints(self) -> CheckpointsResourceWithRawResponse:
- return CheckpointsResourceWithRawResponse(self._jobs.checkpoints)
-
- @cached_property
- def events(self) -> EventsResourceWithRawResponse:
- return EventsResourceWithRawResponse(self._jobs.events)
-
-
-class AsyncJobsResourceWithRawResponse:
- def __init__(self, jobs: AsyncJobsResource) -> None:
- self._jobs = jobs
-
- self.create = async_to_raw_response_wrapper(
- jobs.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- jobs.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- jobs.list,
- )
- self.cancel = async_to_raw_response_wrapper(
- jobs.cancel,
- )
-
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResourceWithRawResponse:
- return AsyncCheckpointsResourceWithRawResponse(self._jobs.checkpoints)
-
- @cached_property
- def events(self) -> AsyncEventsResourceWithRawResponse:
- return AsyncEventsResourceWithRawResponse(self._jobs.events)
-
-
-class JobsResourceWithStreamingResponse:
- def __init__(self, jobs: JobsResource) -> None:
- self._jobs = jobs
-
- self.create = to_streamed_response_wrapper(
- jobs.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- jobs.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- jobs.list,
- )
- self.cancel = to_streamed_response_wrapper(
- jobs.cancel,
- )
-
- @cached_property
- def checkpoints(self) -> CheckpointsResourceWithStreamingResponse:
- return CheckpointsResourceWithStreamingResponse(self._jobs.checkpoints)
-
- @cached_property
- def events(self) -> EventsResourceWithStreamingResponse:
- return EventsResourceWithStreamingResponse(self._jobs.events)
-
-
-class AsyncJobsResourceWithStreamingResponse:
- def __init__(self, jobs: AsyncJobsResource) -> None:
- self._jobs = jobs
-
- self.create = async_to_streamed_response_wrapper(
- jobs.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- jobs.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- jobs.list,
- )
- self.cancel = async_to_streamed_response_wrapper(
- jobs.cancel,
- )
-
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResourceWithStreamingResponse:
- return AsyncCheckpointsResourceWithStreamingResponse(self._jobs.checkpoints)
-
- @cached_property
- def events(self) -> AsyncEventsResourceWithStreamingResponse:
- return AsyncEventsResourceWithStreamingResponse(self._jobs.events)
diff --git a/src/digitalocean_genai_sdk/resources/images.py b/src/digitalocean_genai_sdk/resources/images.py
deleted file mode 100644
index 56a52184..00000000
--- a/src/digitalocean_genai_sdk/resources/images.py
+++ /dev/null
@@ -1,592 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Mapping, Optional, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import image_create_edit_params, image_create_variation_params, image_create_generation_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.images_response import ImagesResponse
-
-__all__ = ["ImagesResource", "AsyncImagesResource"]
-
-
-class ImagesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ImagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ImagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ImagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ImagesResourceWithStreamingResponse(self)
-
- def create_edit(
- self,
- *,
- image: FileTypes,
- prompt: str,
- mask: FileTypes | NotGiven = NOT_GIVEN,
- model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates an edited or extended image given an original image and a prompt.
-
- Args:
- image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
- is not provided, image must have transparency, which will be used as the mask.
-
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters.
-
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "prompt": prompt,
- "mask": mask,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/images/edits",
- body=maybe_transform(body, image_create_edit_params.ImageCreateEditParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
- def create_generation(
- self,
- *,
- prompt: str,
- model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
- style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates an image given a prompt.
-
- Args:
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2` and 4000 characters for `dall-e-3`.
-
- model: The model to use for image generation.
-
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
-
- quality: The quality of the image that will be generated. `hd` creates images with finer
- details and greater consistency across the image. This param is only supported
- for `dall-e-3`.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
- `1024x1792` for `dall-e-3` models.
-
- style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
- causes the model to lean towards generating hyper-real and dramatic images.
- Natural causes the model to produce more natural, less hyper-real looking
- images. This param is only supported for `dall-e-3`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/images/generations",
- body=maybe_transform(
- {
- "prompt": prompt,
- "model": model,
- "n": n,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "style": style,
- "user": user,
- },
- image_create_generation_params.ImageCreateGenerationParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
- def create_variation(
- self,
- *,
- image: FileTypes,
- model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates a variation of a given image.
-
- Args:
- image: The image to use as the basis for the variation(s). Must be a valid PNG file,
- less than 4MB, and square.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/images/variations",
- body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
-
-class AsyncImagesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncImagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncImagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncImagesResourceWithStreamingResponse(self)
-
- async def create_edit(
- self,
- *,
- image: FileTypes,
- prompt: str,
- mask: FileTypes | NotGiven = NOT_GIVEN,
- model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates an edited or extended image given an original image and a prompt.
-
- Args:
- image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
- is not provided, image must have transparency, which will be used as the mask.
-
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters.
-
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "prompt": prompt,
- "mask": mask,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/images/edits",
- body=await async_maybe_transform(body, image_create_edit_params.ImageCreateEditParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
- async def create_generation(
- self,
- *,
- prompt: str,
- model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
- style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates an image given a prompt.
-
- Args:
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2` and 4000 characters for `dall-e-3`.
-
- model: The model to use for image generation.
-
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
-
- quality: The quality of the image that will be generated. `hd` creates images with finer
- details and greater consistency across the image. This param is only supported
- for `dall-e-3`.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
- `1024x1792` for `dall-e-3` models.
-
- style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
- causes the model to lean towards generating hyper-real and dramatic images.
- Natural causes the model to produce more natural, less hyper-real looking
- images. This param is only supported for `dall-e-3`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/images/generations",
- body=await async_maybe_transform(
- {
- "prompt": prompt,
- "model": model,
- "n": n,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "style": style,
- "user": user,
- },
- image_create_generation_params.ImageCreateGenerationParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
- async def create_variation(
- self,
- *,
- image: FileTypes,
- model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates a variation of a given image.
-
- Args:
- image: The image to use as the basis for the variation(s). Must be a valid PNG file,
- less than 4MB, and square.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/images/variations",
- body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
-
-class ImagesResourceWithRawResponse:
- def __init__(self, images: ImagesResource) -> None:
- self._images = images
-
- self.create_edit = to_raw_response_wrapper(
- images.create_edit,
- )
- self.create_generation = to_raw_response_wrapper(
- images.create_generation,
- )
- self.create_variation = to_raw_response_wrapper(
- images.create_variation,
- )
-
-
-class AsyncImagesResourceWithRawResponse:
- def __init__(self, images: AsyncImagesResource) -> None:
- self._images = images
-
- self.create_edit = async_to_raw_response_wrapper(
- images.create_edit,
- )
- self.create_generation = async_to_raw_response_wrapper(
- images.create_generation,
- )
- self.create_variation = async_to_raw_response_wrapper(
- images.create_variation,
- )
-
-
-class ImagesResourceWithStreamingResponse:
- def __init__(self, images: ImagesResource) -> None:
- self._images = images
-
- self.create_edit = to_streamed_response_wrapper(
- images.create_edit,
- )
- self.create_generation = to_streamed_response_wrapper(
- images.create_generation,
- )
- self.create_variation = to_streamed_response_wrapper(
- images.create_variation,
- )
-
-
-class AsyncImagesResourceWithStreamingResponse:
- def __init__(self, images: AsyncImagesResource) -> None:
- self._images = images
-
- self.create_edit = async_to_streamed_response_wrapper(
- images.create_edit,
- )
- self.create_generation = async_to_streamed_response_wrapper(
- images.create_generation,
- )
- self.create_variation = async_to_streamed_response_wrapper(
- images.create_variation,
- )
diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/digitalocean_genai_sdk/resources/models.py
index 53775057..81b75441 100644
--- a/src/digitalocean_genai_sdk/resources/models.py
+++ b/src/digitalocean_genai_sdk/resources/models.py
@@ -16,7 +16,6 @@
from ..types.model import Model
from .._base_client import make_request_options
from ..types.model_list_response import ModelListResponse
-from ..types.model_delete_response import ModelDeleteResponse
__all__ = ["ModelsResource", "AsyncModelsResource"]
@@ -28,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers
"""
return ModelsResourceWithRawResponse(self)
@@ -37,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
+ For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response
"""
return ModelsResourceWithStreamingResponse(self)
@@ -97,41 +96,6 @@ def list(
cast_to=ModelListResponse,
)
- def delete(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelDeleteResponse:
- """Delete a fine-tuned model.
-
- You must have the Owner role in your organization to
- delete a model.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return self._delete(
- f"/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelDeleteResponse,
- )
-
class AsyncModelsResource(AsyncAPIResource):
@cached_property
@@ -140,7 +104,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers
"""
return AsyncModelsResourceWithRawResponse(self)
@@ -149,7 +113,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
+ For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response
"""
return AsyncModelsResourceWithStreamingResponse(self)
@@ -209,41 +173,6 @@ async def list(
cast_to=ModelListResponse,
)
- async def delete(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelDeleteResponse:
- """Delete a fine-tuned model.
-
- You must have the Owner role in your organization to
- delete a model.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return await self._delete(
- f"/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelDeleteResponse,
- )
-
class ModelsResourceWithRawResponse:
def __init__(self, models: ModelsResource) -> None:
@@ -255,9 +184,6 @@ def __init__(self, models: ModelsResource) -> None:
self.list = to_raw_response_wrapper(
models.list,
)
- self.delete = to_raw_response_wrapper(
- models.delete,
- )
class AsyncModelsResourceWithRawResponse:
@@ -270,9 +196,6 @@ def __init__(self, models: AsyncModelsResource) -> None:
self.list = async_to_raw_response_wrapper(
models.list,
)
- self.delete = async_to_raw_response_wrapper(
- models.delete,
- )
class ModelsResourceWithStreamingResponse:
@@ -285,9 +208,6 @@ def __init__(self, models: ModelsResource) -> None:
self.list = to_streamed_response_wrapper(
models.list,
)
- self.delete = to_streamed_response_wrapper(
- models.delete,
- )
class AsyncModelsResourceWithStreamingResponse:
@@ -300,6 +220,3 @@ def __init__(self, models: AsyncModelsResource) -> None:
self.list = async_to_streamed_response_wrapper(
models.list,
)
- self.delete = async_to_streamed_response_wrapper(
- models.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/moderations.py b/src/digitalocean_genai_sdk/resources/moderations.py
deleted file mode 100644
index e9404243..00000000
--- a/src/digitalocean_genai_sdk/resources/moderations.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import moderation_classify_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.moderation_classify_response import ModerationClassifyResponse
-
-__all__ = ["ModerationsResource", "AsyncModerationsResource"]
-
-
-class ModerationsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ModerationsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ModerationsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ModerationsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ModerationsResourceWithStreamingResponse(self)
-
- def classify(
- self,
- *,
- input: Union[str, List[str], Iterable[moderation_classify_params.InputUnionMember2]],
- model: Union[
- str,
- Literal[
- "omni-moderation-latest",
- "omni-moderation-2024-09-26",
- "text-moderation-latest",
- "text-moderation-stable",
- ],
- ]
- | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModerationClassifyResponse:
- """Classifies if text and/or image inputs are potentially harmful.
-
- Learn more in
- the [moderation guide](/docs/guides/moderation).
-
- Args:
- input: Input (or inputs) to classify. Can be a single string, an array of strings, or
- an array of multi-modal input objects similar to other models.
-
- model: The content moderation model you would like to use. Learn more in
- [the moderation guide](/docs/guides/moderation), and learn about available
- models [here](/docs/models#moderation).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/moderations",
- body=maybe_transform(
- {
- "input": input,
- "model": model,
- },
- moderation_classify_params.ModerationClassifyParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModerationClassifyResponse,
- )
-
-
-class AsyncModerationsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncModerationsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncModerationsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncModerationsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncModerationsResourceWithStreamingResponse(self)
-
- async def classify(
- self,
- *,
- input: Union[str, List[str], Iterable[moderation_classify_params.InputUnionMember2]],
- model: Union[
- str,
- Literal[
- "omni-moderation-latest",
- "omni-moderation-2024-09-26",
- "text-moderation-latest",
- "text-moderation-stable",
- ],
- ]
- | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModerationClassifyResponse:
- """Classifies if text and/or image inputs are potentially harmful.
-
- Learn more in
- the [moderation guide](/docs/guides/moderation).
-
- Args:
- input: Input (or inputs) to classify. Can be a single string, an array of strings, or
- an array of multi-modal input objects similar to other models.
-
- model: The content moderation model you would like to use. Learn more in
- [the moderation guide](/docs/guides/moderation), and learn about available
- models [here](/docs/models#moderation).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/moderations",
- body=await async_maybe_transform(
- {
- "input": input,
- "model": model,
- },
- moderation_classify_params.ModerationClassifyParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModerationClassifyResponse,
- )
-
-
-class ModerationsResourceWithRawResponse:
- def __init__(self, moderations: ModerationsResource) -> None:
- self._moderations = moderations
-
- self.classify = to_raw_response_wrapper(
- moderations.classify,
- )
-
-
-class AsyncModerationsResourceWithRawResponse:
- def __init__(self, moderations: AsyncModerationsResource) -> None:
- self._moderations = moderations
-
- self.classify = async_to_raw_response_wrapper(
- moderations.classify,
- )
-
-
-class ModerationsResourceWithStreamingResponse:
- def __init__(self, moderations: ModerationsResource) -> None:
- self._moderations = moderations
-
- self.classify = to_streamed_response_wrapper(
- moderations.classify,
- )
-
-
-class AsyncModerationsResourceWithStreamingResponse:
- def __init__(self, moderations: AsyncModerationsResource) -> None:
- self._moderations = moderations
-
- self.classify = async_to_streamed_response_wrapper(
- moderations.classify,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/__init__.py b/src/digitalocean_genai_sdk/resources/organization/__init__.py
deleted file mode 100644
index cf206d71..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/__init__.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .usage import (
- UsageResource,
- AsyncUsageResource,
- UsageResourceWithRawResponse,
- AsyncUsageResourceWithRawResponse,
- UsageResourceWithStreamingResponse,
- AsyncUsageResourceWithStreamingResponse,
-)
-from .users import (
- UsersResource,
- AsyncUsersResource,
- UsersResourceWithRawResponse,
- AsyncUsersResourceWithRawResponse,
- UsersResourceWithStreamingResponse,
- AsyncUsersResourceWithStreamingResponse,
-)
-from .invites import (
- InvitesResource,
- AsyncInvitesResource,
- InvitesResourceWithRawResponse,
- AsyncInvitesResourceWithRawResponse,
- InvitesResourceWithStreamingResponse,
- AsyncInvitesResourceWithStreamingResponse,
-)
-from .projects import (
- ProjectsResource,
- AsyncProjectsResource,
- ProjectsResourceWithRawResponse,
- AsyncProjectsResourceWithRawResponse,
- ProjectsResourceWithStreamingResponse,
- AsyncProjectsResourceWithStreamingResponse,
-)
-from .organization import (
- OrganizationResource,
- AsyncOrganizationResource,
- OrganizationResourceWithRawResponse,
- AsyncOrganizationResourceWithRawResponse,
- OrganizationResourceWithStreamingResponse,
- AsyncOrganizationResourceWithStreamingResponse,
-)
-from .admin_api_keys import (
- AdminAPIKeysResource,
- AsyncAdminAPIKeysResource,
- AdminAPIKeysResourceWithRawResponse,
- AsyncAdminAPIKeysResourceWithRawResponse,
- AdminAPIKeysResourceWithStreamingResponse,
- AsyncAdminAPIKeysResourceWithStreamingResponse,
-)
-
-__all__ = [
- "AdminAPIKeysResource",
- "AsyncAdminAPIKeysResource",
- "AdminAPIKeysResourceWithRawResponse",
- "AsyncAdminAPIKeysResourceWithRawResponse",
- "AdminAPIKeysResourceWithStreamingResponse",
- "AsyncAdminAPIKeysResourceWithStreamingResponse",
- "InvitesResource",
- "AsyncInvitesResource",
- "InvitesResourceWithRawResponse",
- "AsyncInvitesResourceWithRawResponse",
- "InvitesResourceWithStreamingResponse",
- "AsyncInvitesResourceWithStreamingResponse",
- "ProjectsResource",
- "AsyncProjectsResource",
- "ProjectsResourceWithRawResponse",
- "AsyncProjectsResourceWithRawResponse",
- "ProjectsResourceWithStreamingResponse",
- "AsyncProjectsResourceWithStreamingResponse",
- "UsageResource",
- "AsyncUsageResource",
- "UsageResourceWithRawResponse",
- "AsyncUsageResourceWithRawResponse",
- "UsageResourceWithStreamingResponse",
- "AsyncUsageResourceWithStreamingResponse",
- "UsersResource",
- "AsyncUsersResource",
- "UsersResourceWithRawResponse",
- "AsyncUsersResourceWithRawResponse",
- "UsersResourceWithStreamingResponse",
- "AsyncUsersResourceWithStreamingResponse",
- "OrganizationResource",
- "AsyncOrganizationResource",
- "OrganizationResourceWithRawResponse",
- "AsyncOrganizationResourceWithRawResponse",
- "OrganizationResourceWithStreamingResponse",
- "AsyncOrganizationResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py b/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py
deleted file mode 100644
index 7224871f..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py
+++ /dev/null
@@ -1,444 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.organization import admin_api_key_list_params, admin_api_key_create_params
-from ...types.organization.admin_api_key import AdminAPIKey
-from ...types.organization.admin_api_key_list_response import AdminAPIKeyListResponse
-from ...types.organization.admin_api_key_delete_response import AdminAPIKeyDeleteResponse
-
-__all__ = ["AdminAPIKeysResource", "AsyncAdminAPIKeysResource"]
-
-
-class AdminAPIKeysResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AdminAPIKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AdminAPIKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AdminAPIKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AdminAPIKeysResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKey:
- """
- Create a new admin-level API key for the organization.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/organization/admin_api_keys",
- body=maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKey,
- )
-
- def retrieve(
- self,
- key_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKey:
- """
- Get details for a specific organization API key by its ID.
-
- Args:
- key_id: The ID of the API key.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return self._get(
- f"/organization/admin_api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKey,
- )
-
- def list(
- self,
- *,
- after: Optional[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKeyListResponse:
- """
- Retrieve a paginated list of organization admin API keys.
-
- Args:
- after: Return keys with IDs that come after this ID in the pagination order.
-
- limit: Maximum number of keys to return.
-
- order: Order results by creation time, ascending or descending.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/admin_api_keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- },
- admin_api_key_list_params.AdminAPIKeyListParams,
- ),
- ),
- cast_to=AdminAPIKeyListResponse,
- )
-
- def delete(
- self,
- key_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKeyDeleteResponse:
- """
- Delete the specified admin API key.
-
- Args:
- key_id: The ID of the API key to be deleted.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return self._delete(
- f"/organization/admin_api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKeyDeleteResponse,
- )
-
-
-class AsyncAdminAPIKeysResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAdminAPIKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAdminAPIKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAdminAPIKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncAdminAPIKeysResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKey:
- """
- Create a new admin-level API key for the organization.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/organization/admin_api_keys",
- body=await async_maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKey,
- )
-
- async def retrieve(
- self,
- key_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKey:
- """
- Get details for a specific organization API key by its ID.
-
- Args:
- key_id: The ID of the API key.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return await self._get(
- f"/organization/admin_api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKey,
- )
-
- async def list(
- self,
- *,
- after: Optional[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKeyListResponse:
- """
- Retrieve a paginated list of organization admin API keys.
-
- Args:
- after: Return keys with IDs that come after this ID in the pagination order.
-
- limit: Maximum number of keys to return.
-
- order: Order results by creation time, ascending or descending.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/admin_api_keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- },
- admin_api_key_list_params.AdminAPIKeyListParams,
- ),
- ),
- cast_to=AdminAPIKeyListResponse,
- )
-
- async def delete(
- self,
- key_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKeyDeleteResponse:
- """
- Delete the specified admin API key.
-
- Args:
- key_id: The ID of the API key to be deleted.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return await self._delete(
- f"/organization/admin_api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKeyDeleteResponse,
- )
-
-
-class AdminAPIKeysResourceWithRawResponse:
- def __init__(self, admin_api_keys: AdminAPIKeysResource) -> None:
- self._admin_api_keys = admin_api_keys
-
- self.create = to_raw_response_wrapper(
- admin_api_keys.create,
- )
- self.retrieve = to_raw_response_wrapper(
- admin_api_keys.retrieve,
- )
- self.list = to_raw_response_wrapper(
- admin_api_keys.list,
- )
- self.delete = to_raw_response_wrapper(
- admin_api_keys.delete,
- )
-
-
-class AsyncAdminAPIKeysResourceWithRawResponse:
- def __init__(self, admin_api_keys: AsyncAdminAPIKeysResource) -> None:
- self._admin_api_keys = admin_api_keys
-
- self.create = async_to_raw_response_wrapper(
- admin_api_keys.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- admin_api_keys.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- admin_api_keys.list,
- )
- self.delete = async_to_raw_response_wrapper(
- admin_api_keys.delete,
- )
-
-
-class AdminAPIKeysResourceWithStreamingResponse:
- def __init__(self, admin_api_keys: AdminAPIKeysResource) -> None:
- self._admin_api_keys = admin_api_keys
-
- self.create = to_streamed_response_wrapper(
- admin_api_keys.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- admin_api_keys.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- admin_api_keys.list,
- )
- self.delete = to_streamed_response_wrapper(
- admin_api_keys.delete,
- )
-
-
-class AsyncAdminAPIKeysResourceWithStreamingResponse:
- def __init__(self, admin_api_keys: AsyncAdminAPIKeysResource) -> None:
- self._admin_api_keys = admin_api_keys
-
- self.create = async_to_streamed_response_wrapper(
- admin_api_keys.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- admin_api_keys.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- admin_api_keys.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- admin_api_keys.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/invites.py b/src/digitalocean_genai_sdk/resources/organization/invites.py
deleted file mode 100644
index 16bd17bc..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/invites.py
+++ /dev/null
@@ -1,476 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.organization import invite_list_params, invite_create_params
-from ...types.organization.invite import Invite
-from ...types.organization.invite_list_response import InviteListResponse
-from ...types.organization.invite_delete_response import InviteDeleteResponse
-
-__all__ = ["InvitesResource", "AsyncInvitesResource"]
-
-
-class InvitesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> InvitesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return InvitesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> InvitesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return InvitesResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- email: str,
- role: Literal["reader", "owner"],
- projects: Iterable[invite_create_params.Project] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Invite:
- """Create an invite for a user to the organization.
-
- The invite must be accepted by
- the user before they have access to the organization.
-
- Args:
- email: Send an email to this address
-
- role: `owner` or `reader`
-
- projects: An array of projects to which membership is granted at the same time the org
- invite is accepted. If omitted, the user will be invited to the default project
- for compatibility with legacy behavior.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/organization/invites",
- body=maybe_transform(
- {
- "email": email,
- "role": role,
- "projects": projects,
- },
- invite_create_params.InviteCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Invite,
- )
-
- def retrieve(
- self,
- invite_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Invite:
- """
- Retrieves an invite.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not invite_id:
- raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
- return self._get(
- f"/organization/invites/{invite_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Invite,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> InviteListResponse:
- """
- Returns a list of invites in the organization.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/invites",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- invite_list_params.InviteListParams,
- ),
- ),
- cast_to=InviteListResponse,
- )
-
- def delete(
- self,
- invite_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> InviteDeleteResponse:
- """Delete an invite.
-
- If the invite has already been accepted, it cannot be deleted.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not invite_id:
- raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
- return self._delete(
- f"/organization/invites/{invite_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=InviteDeleteResponse,
- )
-
-
-class AsyncInvitesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncInvitesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncInvitesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncInvitesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncInvitesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- email: str,
- role: Literal["reader", "owner"],
- projects: Iterable[invite_create_params.Project] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Invite:
- """Create an invite for a user to the organization.
-
- The invite must be accepted by
- the user before they have access to the organization.
-
- Args:
- email: Send an email to this address
-
- role: `owner` or `reader`
-
- projects: An array of projects to which membership is granted at the same time the org
- invite is accepted. If omitted, the user will be invited to the default project
- for compatibility with legacy behavior.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/organization/invites",
- body=await async_maybe_transform(
- {
- "email": email,
- "role": role,
- "projects": projects,
- },
- invite_create_params.InviteCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Invite,
- )
-
- async def retrieve(
- self,
- invite_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Invite:
- """
- Retrieves an invite.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not invite_id:
- raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
- return await self._get(
- f"/organization/invites/{invite_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Invite,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> InviteListResponse:
- """
- Returns a list of invites in the organization.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/invites",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- invite_list_params.InviteListParams,
- ),
- ),
- cast_to=InviteListResponse,
- )
-
- async def delete(
- self,
- invite_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> InviteDeleteResponse:
- """Delete an invite.
-
- If the invite has already been accepted, it cannot be deleted.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not invite_id:
- raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
- return await self._delete(
- f"/organization/invites/{invite_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=InviteDeleteResponse,
- )
-
-
-class InvitesResourceWithRawResponse:
- def __init__(self, invites: InvitesResource) -> None:
- self._invites = invites
-
- self.create = to_raw_response_wrapper(
- invites.create,
- )
- self.retrieve = to_raw_response_wrapper(
- invites.retrieve,
- )
- self.list = to_raw_response_wrapper(
- invites.list,
- )
- self.delete = to_raw_response_wrapper(
- invites.delete,
- )
-
-
-class AsyncInvitesResourceWithRawResponse:
- def __init__(self, invites: AsyncInvitesResource) -> None:
- self._invites = invites
-
- self.create = async_to_raw_response_wrapper(
- invites.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- invites.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- invites.list,
- )
- self.delete = async_to_raw_response_wrapper(
- invites.delete,
- )
-
-
-class InvitesResourceWithStreamingResponse:
- def __init__(self, invites: InvitesResource) -> None:
- self._invites = invites
-
- self.create = to_streamed_response_wrapper(
- invites.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- invites.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- invites.list,
- )
- self.delete = to_streamed_response_wrapper(
- invites.delete,
- )
-
-
-class AsyncInvitesResourceWithStreamingResponse:
- def __init__(self, invites: AsyncInvitesResource) -> None:
- self._invites = invites
-
- self.create = async_to_streamed_response_wrapper(
- invites.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- invites.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- invites.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- invites.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/organization.py b/src/digitalocean_genai_sdk/resources/organization/organization.py
deleted file mode 100644
index 4a9aa4fb..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/organization.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from .usage import (
- UsageResource,
- AsyncUsageResource,
- UsageResourceWithRawResponse,
- AsyncUsageResourceWithRawResponse,
- UsageResourceWithStreamingResponse,
- AsyncUsageResourceWithStreamingResponse,
-)
-from .users import (
- UsersResource,
- AsyncUsersResource,
- UsersResourceWithRawResponse,
- AsyncUsersResourceWithRawResponse,
- UsersResourceWithStreamingResponse,
- AsyncUsersResourceWithStreamingResponse,
-)
-from ...types import organization_get_costs_params, organization_list_audit_logs_params
-from .invites import (
- InvitesResource,
- AsyncInvitesResource,
- InvitesResourceWithRawResponse,
- AsyncInvitesResourceWithRawResponse,
- InvitesResourceWithStreamingResponse,
- AsyncInvitesResourceWithStreamingResponse,
-)
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from .admin_api_keys import (
- AdminAPIKeysResource,
- AsyncAdminAPIKeysResource,
- AdminAPIKeysResourceWithRawResponse,
- AsyncAdminAPIKeysResourceWithRawResponse,
- AdminAPIKeysResourceWithStreamingResponse,
- AsyncAdminAPIKeysResourceWithStreamingResponse,
-)
-from .projects.projects import (
- ProjectsResource,
- AsyncProjectsResource,
- ProjectsResourceWithRawResponse,
- AsyncProjectsResourceWithRawResponse,
- ProjectsResourceWithStreamingResponse,
- AsyncProjectsResourceWithStreamingResponse,
-)
-from ...types.usage_response import UsageResponse
-from ...types.audit_log_event_type import AuditLogEventType
-from ...types.organization_list_audit_logs_response import OrganizationListAuditLogsResponse
-
-__all__ = ["OrganizationResource", "AsyncOrganizationResource"]
-
-
-class OrganizationResource(SyncAPIResource):
- @cached_property
- def admin_api_keys(self) -> AdminAPIKeysResource:
- return AdminAPIKeysResource(self._client)
-
- @cached_property
- def invites(self) -> InvitesResource:
- return InvitesResource(self._client)
-
- @cached_property
- def projects(self) -> ProjectsResource:
- return ProjectsResource(self._client)
-
- @cached_property
- def usage(self) -> UsageResource:
- return UsageResource(self._client)
-
- @cached_property
- def users(self) -> UsersResource:
- return UsersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> OrganizationResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return OrganizationResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> OrganizationResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return OrganizationResourceWithStreamingResponse(self)
-
- def get_costs(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "line_item"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get costs details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default
- to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the costs by the specified fields. Support fields include `project_id`,
- `line_item` and any combination of them.
-
- limit: A limit on the number of buckets to be returned. Limit can range between 1 and
- 180, and the default is 7.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only costs for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/costs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- organization_get_costs_params.OrganizationGetCostsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def list_audit_logs(
- self,
- *,
- actor_emails: List[str] | NotGiven = NOT_GIVEN,
- actor_ids: List[str] | NotGiven = NOT_GIVEN,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- effective_at: organization_list_audit_logs_params.EffectiveAt | NotGiven = NOT_GIVEN,
- event_types: List[AuditLogEventType] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- resource_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationListAuditLogsResponse:
- """
- List user actions and configuration changes within this organization.
-
- Args:
- actor_emails: Return only events performed by users with these emails.
-
- actor_ids: Return only events performed by these actors. Can be a user ID, a service
- account ID, or an api key tracking ID.
-
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- effective_at: Return only events whose `effective_at` (Unix seconds) is in this range.
-
- event_types: Return only events with a `type` in one of these values. For example,
- `project.created`. For all options, see the documentation for the
- [audit log object](/docs/api-reference/audit-logs/object).
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- project_ids: Return only events for these projects.
-
- resource_ids: Return only events performed on these targets. For example, a project ID
- updated.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/audit_logs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "actor_emails": actor_emails,
- "actor_ids": actor_ids,
- "after": after,
- "before": before,
- "effective_at": effective_at,
- "event_types": event_types,
- "limit": limit,
- "project_ids": project_ids,
- "resource_ids": resource_ids,
- },
- organization_list_audit_logs_params.OrganizationListAuditLogsParams,
- ),
- ),
- cast_to=OrganizationListAuditLogsResponse,
- )
-
-
-class AsyncOrganizationResource(AsyncAPIResource):
- @cached_property
- def admin_api_keys(self) -> AsyncAdminAPIKeysResource:
- return AsyncAdminAPIKeysResource(self._client)
-
- @cached_property
- def invites(self) -> AsyncInvitesResource:
- return AsyncInvitesResource(self._client)
-
- @cached_property
- def projects(self) -> AsyncProjectsResource:
- return AsyncProjectsResource(self._client)
-
- @cached_property
- def usage(self) -> AsyncUsageResource:
- return AsyncUsageResource(self._client)
-
- @cached_property
- def users(self) -> AsyncUsersResource:
- return AsyncUsersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncOrganizationResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncOrganizationResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncOrganizationResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncOrganizationResourceWithStreamingResponse(self)
-
- async def get_costs(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "line_item"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get costs details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default
- to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the costs by the specified fields. Support fields include `project_id`,
- `line_item` and any combination of them.
-
- limit: A limit on the number of buckets to be returned. Limit can range between 1 and
- 180, and the default is 7.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only costs for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/costs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- organization_get_costs_params.OrganizationGetCostsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def list_audit_logs(
- self,
- *,
- actor_emails: List[str] | NotGiven = NOT_GIVEN,
- actor_ids: List[str] | NotGiven = NOT_GIVEN,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- effective_at: organization_list_audit_logs_params.EffectiveAt | NotGiven = NOT_GIVEN,
- event_types: List[AuditLogEventType] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- resource_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationListAuditLogsResponse:
- """
- List user actions and configuration changes within this organization.
-
- Args:
- actor_emails: Return only events performed by users with these emails.
-
- actor_ids: Return only events performed by these actors. Can be a user ID, a service
- account ID, or an api key tracking ID.
-
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- effective_at: Return only events whose `effective_at` (Unix seconds) is in this range.
-
- event_types: Return only events with a `type` in one of these values. For example,
- `project.created`. For all options, see the documentation for the
- [audit log object](/docs/api-reference/audit-logs/object).
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- project_ids: Return only events for these projects.
-
- resource_ids: Return only events performed on these targets. For example, a project ID
- updated.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/audit_logs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "actor_emails": actor_emails,
- "actor_ids": actor_ids,
- "after": after,
- "before": before,
- "effective_at": effective_at,
- "event_types": event_types,
- "limit": limit,
- "project_ids": project_ids,
- "resource_ids": resource_ids,
- },
- organization_list_audit_logs_params.OrganizationListAuditLogsParams,
- ),
- ),
- cast_to=OrganizationListAuditLogsResponse,
- )
-
-
-class OrganizationResourceWithRawResponse:
- def __init__(self, organization: OrganizationResource) -> None:
- self._organization = organization
-
- self.get_costs = to_raw_response_wrapper(
- organization.get_costs,
- )
- self.list_audit_logs = to_raw_response_wrapper(
- organization.list_audit_logs,
- )
-
- @cached_property
- def admin_api_keys(self) -> AdminAPIKeysResourceWithRawResponse:
- return AdminAPIKeysResourceWithRawResponse(self._organization.admin_api_keys)
-
- @cached_property
- def invites(self) -> InvitesResourceWithRawResponse:
- return InvitesResourceWithRawResponse(self._organization.invites)
-
- @cached_property
- def projects(self) -> ProjectsResourceWithRawResponse:
- return ProjectsResourceWithRawResponse(self._organization.projects)
-
- @cached_property
- def usage(self) -> UsageResourceWithRawResponse:
- return UsageResourceWithRawResponse(self._organization.usage)
-
- @cached_property
- def users(self) -> UsersResourceWithRawResponse:
- return UsersResourceWithRawResponse(self._organization.users)
-
-
-class AsyncOrganizationResourceWithRawResponse:
- def __init__(self, organization: AsyncOrganizationResource) -> None:
- self._organization = organization
-
- self.get_costs = async_to_raw_response_wrapper(
- organization.get_costs,
- )
- self.list_audit_logs = async_to_raw_response_wrapper(
- organization.list_audit_logs,
- )
-
- @cached_property
- def admin_api_keys(self) -> AsyncAdminAPIKeysResourceWithRawResponse:
- return AsyncAdminAPIKeysResourceWithRawResponse(self._organization.admin_api_keys)
-
- @cached_property
- def invites(self) -> AsyncInvitesResourceWithRawResponse:
- return AsyncInvitesResourceWithRawResponse(self._organization.invites)
-
- @cached_property
- def projects(self) -> AsyncProjectsResourceWithRawResponse:
- return AsyncProjectsResourceWithRawResponse(self._organization.projects)
-
- @cached_property
- def usage(self) -> AsyncUsageResourceWithRawResponse:
- return AsyncUsageResourceWithRawResponse(self._organization.usage)
-
- @cached_property
- def users(self) -> AsyncUsersResourceWithRawResponse:
- return AsyncUsersResourceWithRawResponse(self._organization.users)
-
-
-class OrganizationResourceWithStreamingResponse:
- def __init__(self, organization: OrganizationResource) -> None:
- self._organization = organization
-
- self.get_costs = to_streamed_response_wrapper(
- organization.get_costs,
- )
- self.list_audit_logs = to_streamed_response_wrapper(
- organization.list_audit_logs,
- )
-
- @cached_property
- def admin_api_keys(self) -> AdminAPIKeysResourceWithStreamingResponse:
- return AdminAPIKeysResourceWithStreamingResponse(self._organization.admin_api_keys)
-
- @cached_property
- def invites(self) -> InvitesResourceWithStreamingResponse:
- return InvitesResourceWithStreamingResponse(self._organization.invites)
-
- @cached_property
- def projects(self) -> ProjectsResourceWithStreamingResponse:
- return ProjectsResourceWithStreamingResponse(self._organization.projects)
-
- @cached_property
- def usage(self) -> UsageResourceWithStreamingResponse:
- return UsageResourceWithStreamingResponse(self._organization.usage)
-
- @cached_property
- def users(self) -> UsersResourceWithStreamingResponse:
- return UsersResourceWithStreamingResponse(self._organization.users)
-
-
-class AsyncOrganizationResourceWithStreamingResponse:
- def __init__(self, organization: AsyncOrganizationResource) -> None:
- self._organization = organization
-
- self.get_costs = async_to_streamed_response_wrapper(
- organization.get_costs,
- )
- self.list_audit_logs = async_to_streamed_response_wrapper(
- organization.list_audit_logs,
- )
-
- @cached_property
- def admin_api_keys(self) -> AsyncAdminAPIKeysResourceWithStreamingResponse:
- return AsyncAdminAPIKeysResourceWithStreamingResponse(self._organization.admin_api_keys)
-
- @cached_property
- def invites(self) -> AsyncInvitesResourceWithStreamingResponse:
- return AsyncInvitesResourceWithStreamingResponse(self._organization.invites)
-
- @cached_property
- def projects(self) -> AsyncProjectsResourceWithStreamingResponse:
- return AsyncProjectsResourceWithStreamingResponse(self._organization.projects)
-
- @cached_property
- def usage(self) -> AsyncUsageResourceWithStreamingResponse:
- return AsyncUsageResourceWithStreamingResponse(self._organization.usage)
-
- @cached_property
- def users(self) -> AsyncUsersResourceWithStreamingResponse:
- return AsyncUsersResourceWithStreamingResponse(self._organization.users)
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py b/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py
deleted file mode 100644
index f3ceec3b..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .users import (
- UsersResource,
- AsyncUsersResource,
- UsersResourceWithRawResponse,
- AsyncUsersResourceWithRawResponse,
- UsersResourceWithStreamingResponse,
- AsyncUsersResourceWithStreamingResponse,
-)
-from .api_keys import (
- APIKeysResource,
- AsyncAPIKeysResource,
- APIKeysResourceWithRawResponse,
- AsyncAPIKeysResourceWithRawResponse,
- APIKeysResourceWithStreamingResponse,
- AsyncAPIKeysResourceWithStreamingResponse,
-)
-from .projects import (
- ProjectsResource,
- AsyncProjectsResource,
- ProjectsResourceWithRawResponse,
- AsyncProjectsResourceWithRawResponse,
- ProjectsResourceWithStreamingResponse,
- AsyncProjectsResourceWithStreamingResponse,
-)
-from .rate_limits import (
- RateLimitsResource,
- AsyncRateLimitsResource,
- RateLimitsResourceWithRawResponse,
- AsyncRateLimitsResourceWithRawResponse,
- RateLimitsResourceWithStreamingResponse,
- AsyncRateLimitsResourceWithStreamingResponse,
-)
-from .service_accounts import (
- ServiceAccountsResource,
- AsyncServiceAccountsResource,
- ServiceAccountsResourceWithRawResponse,
- AsyncServiceAccountsResourceWithRawResponse,
- ServiceAccountsResourceWithStreamingResponse,
- AsyncServiceAccountsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "APIKeysResource",
- "AsyncAPIKeysResource",
- "APIKeysResourceWithRawResponse",
- "AsyncAPIKeysResourceWithRawResponse",
- "APIKeysResourceWithStreamingResponse",
- "AsyncAPIKeysResourceWithStreamingResponse",
- "RateLimitsResource",
- "AsyncRateLimitsResource",
- "RateLimitsResourceWithRawResponse",
- "AsyncRateLimitsResourceWithRawResponse",
- "RateLimitsResourceWithStreamingResponse",
- "AsyncRateLimitsResourceWithStreamingResponse",
- "ServiceAccountsResource",
- "AsyncServiceAccountsResource",
- "ServiceAccountsResourceWithRawResponse",
- "AsyncServiceAccountsResourceWithRawResponse",
- "ServiceAccountsResourceWithStreamingResponse",
- "AsyncServiceAccountsResourceWithStreamingResponse",
- "UsersResource",
- "AsyncUsersResource",
- "UsersResourceWithRawResponse",
- "AsyncUsersResourceWithRawResponse",
- "UsersResourceWithStreamingResponse",
- "AsyncUsersResourceWithStreamingResponse",
- "ProjectsResource",
- "AsyncProjectsResource",
- "ProjectsResourceWithRawResponse",
- "AsyncProjectsResourceWithRawResponse",
- "ProjectsResourceWithStreamingResponse",
- "AsyncProjectsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py b/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py
deleted file mode 100644
index c5907765..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py
+++ /dev/null
@@ -1,375 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.organization.projects import api_key_list_params
-from ....types.organization.projects.api_key import APIKey
-from ....types.organization.projects.api_key_list_response import APIKeyListResponse
-from ....types.organization.projects.api_key_delete_response import APIKeyDeleteResponse
-
-__all__ = ["APIKeysResource", "AsyncAPIKeysResource"]
-
-
-class APIKeysResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> APIKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return APIKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return APIKeysResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- key_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKey:
- """
- Retrieves an API key in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=APIKey,
- )
-
- def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKeyListResponse:
- """
- Returns a list of API keys in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/api_keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- api_key_list_params.APIKeyListParams,
- ),
- ),
- cast_to=APIKeyListResponse,
- )
-
- def delete(
- self,
- key_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKeyDeleteResponse:
- """
- Deletes an API key from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return self._delete(
- f"/organization/projects/{project_id}/api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=APIKeyDeleteResponse,
- )
-
-
-class AsyncAPIKeysResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAPIKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncAPIKeysResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- key_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKey:
- """
- Retrieves an API key in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=APIKey,
- )
-
- async def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKeyListResponse:
- """
- Returns a list of API keys in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/api_keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- api_key_list_params.APIKeyListParams,
- ),
- ),
- cast_to=APIKeyListResponse,
- )
-
- async def delete(
- self,
- key_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKeyDeleteResponse:
- """
- Deletes an API key from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return await self._delete(
- f"/organization/projects/{project_id}/api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=APIKeyDeleteResponse,
- )
-
-
-class APIKeysResourceWithRawResponse:
- def __init__(self, api_keys: APIKeysResource) -> None:
- self._api_keys = api_keys
-
- self.retrieve = to_raw_response_wrapper(
- api_keys.retrieve,
- )
- self.list = to_raw_response_wrapper(
- api_keys.list,
- )
- self.delete = to_raw_response_wrapper(
- api_keys.delete,
- )
-
-
-class AsyncAPIKeysResourceWithRawResponse:
- def __init__(self, api_keys: AsyncAPIKeysResource) -> None:
- self._api_keys = api_keys
-
- self.retrieve = async_to_raw_response_wrapper(
- api_keys.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- api_keys.list,
- )
- self.delete = async_to_raw_response_wrapper(
- api_keys.delete,
- )
-
-
-class APIKeysResourceWithStreamingResponse:
- def __init__(self, api_keys: APIKeysResource) -> None:
- self._api_keys = api_keys
-
- self.retrieve = to_streamed_response_wrapper(
- api_keys.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- api_keys.list,
- )
- self.delete = to_streamed_response_wrapper(
- api_keys.delete,
- )
-
-
-class AsyncAPIKeysResourceWithStreamingResponse:
- def __init__(self, api_keys: AsyncAPIKeysResource) -> None:
- self._api_keys = api_keys
-
- self.retrieve = async_to_streamed_response_wrapper(
- api_keys.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- api_keys.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- api_keys.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py b/src/digitalocean_genai_sdk/resources/organization/projects/projects.py
deleted file mode 100644
index 93e42de8..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py
+++ /dev/null
@@ -1,670 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from .users import (
- UsersResource,
- AsyncUsersResource,
- UsersResourceWithRawResponse,
- AsyncUsersResourceWithRawResponse,
- UsersResourceWithStreamingResponse,
- AsyncUsersResourceWithStreamingResponse,
-)
-from .api_keys import (
- APIKeysResource,
- AsyncAPIKeysResource,
- APIKeysResourceWithRawResponse,
- AsyncAPIKeysResourceWithRawResponse,
- APIKeysResourceWithStreamingResponse,
- AsyncAPIKeysResourceWithStreamingResponse,
-)
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from .rate_limits import (
- RateLimitsResource,
- AsyncRateLimitsResource,
- RateLimitsResourceWithRawResponse,
- AsyncRateLimitsResourceWithRawResponse,
- RateLimitsResourceWithStreamingResponse,
- AsyncRateLimitsResourceWithStreamingResponse,
-)
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from .service_accounts import (
- ServiceAccountsResource,
- AsyncServiceAccountsResource,
- ServiceAccountsResourceWithRawResponse,
- AsyncServiceAccountsResourceWithRawResponse,
- ServiceAccountsResourceWithStreamingResponse,
- AsyncServiceAccountsResourceWithStreamingResponse,
-)
-from ....types.organization import project_list_params, project_create_params, project_update_params
-from ....types.organization.project import Project
-from ....types.organization.project_list_response import ProjectListResponse
-
-__all__ = ["ProjectsResource", "AsyncProjectsResource"]
-
-
-class ProjectsResource(SyncAPIResource):
- @cached_property
- def api_keys(self) -> APIKeysResource:
- return APIKeysResource(self._client)
-
- @cached_property
- def rate_limits(self) -> RateLimitsResource:
- return RateLimitsResource(self._client)
-
- @cached_property
- def service_accounts(self) -> ServiceAccountsResource:
- return ServiceAccountsResource(self._client)
-
- @cached_property
- def users(self) -> UsersResource:
- return UsersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ProjectsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ProjectsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ProjectsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ProjectsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """Create a new project in the organization.
-
- Projects can be created and archived,
- but cannot be deleted.
-
- Args:
- name: The friendly name of the project, this name appears in reports.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/organization/projects",
- body=maybe_transform({"name": name}, project_create_params.ProjectCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- def retrieve(
- self,
- project_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """
- Retrieves a project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- def update(
- self,
- project_id: str,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """
- Modifies a project in the organization.
-
- Args:
- name: The updated name of the project, this name appears in reports.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._post(
- f"/organization/projects/{project_id}",
- body=maybe_transform({"name": name}, project_update_params.ProjectUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- include_archived: bool | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectListResponse:
- """Returns a list of projects.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- include_archived: If `true` returns all projects including those that have been `archived`.
- Archived projects are not included by default.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/projects",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "include_archived": include_archived,
- "limit": limit,
- },
- project_list_params.ProjectListParams,
- ),
- ),
- cast_to=ProjectListResponse,
- )
-
- def archive(
- self,
- project_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """Archives a project in the organization.
-
- Archived projects cannot be used or
- updated.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/archive",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
-
-class AsyncProjectsResource(AsyncAPIResource):
- @cached_property
- def api_keys(self) -> AsyncAPIKeysResource:
- return AsyncAPIKeysResource(self._client)
-
- @cached_property
- def rate_limits(self) -> AsyncRateLimitsResource:
- return AsyncRateLimitsResource(self._client)
-
- @cached_property
- def service_accounts(self) -> AsyncServiceAccountsResource:
- return AsyncServiceAccountsResource(self._client)
-
- @cached_property
- def users(self) -> AsyncUsersResource:
- return AsyncUsersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncProjectsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncProjectsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncProjectsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncProjectsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """Create a new project in the organization.
-
- Projects can be created and archived,
- but cannot be deleted.
-
- Args:
- name: The friendly name of the project, this name appears in reports.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/organization/projects",
- body=await async_maybe_transform({"name": name}, project_create_params.ProjectCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- async def retrieve(
- self,
- project_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """
- Retrieves a project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- async def update(
- self,
- project_id: str,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """
- Modifies a project in the organization.
-
- Args:
- name: The updated name of the project, this name appears in reports.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}",
- body=await async_maybe_transform({"name": name}, project_update_params.ProjectUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- include_archived: bool | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectListResponse:
- """Returns a list of projects.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- include_archived: If `true` returns all projects including those that have been `archived`.
- Archived projects are not included by default.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/projects",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "include_archived": include_archived,
- "limit": limit,
- },
- project_list_params.ProjectListParams,
- ),
- ),
- cast_to=ProjectListResponse,
- )
-
- async def archive(
- self,
- project_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """Archives a project in the organization.
-
- Archived projects cannot be used or
- updated.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/archive",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
-
-class ProjectsResourceWithRawResponse:
- def __init__(self, projects: ProjectsResource) -> None:
- self._projects = projects
-
- self.create = to_raw_response_wrapper(
- projects.create,
- )
- self.retrieve = to_raw_response_wrapper(
- projects.retrieve,
- )
- self.update = to_raw_response_wrapper(
- projects.update,
- )
- self.list = to_raw_response_wrapper(
- projects.list,
- )
- self.archive = to_raw_response_wrapper(
- projects.archive,
- )
-
- @cached_property
- def api_keys(self) -> APIKeysResourceWithRawResponse:
- return APIKeysResourceWithRawResponse(self._projects.api_keys)
-
- @cached_property
- def rate_limits(self) -> RateLimitsResourceWithRawResponse:
- return RateLimitsResourceWithRawResponse(self._projects.rate_limits)
-
- @cached_property
- def service_accounts(self) -> ServiceAccountsResourceWithRawResponse:
- return ServiceAccountsResourceWithRawResponse(self._projects.service_accounts)
-
- @cached_property
- def users(self) -> UsersResourceWithRawResponse:
- return UsersResourceWithRawResponse(self._projects.users)
-
-
-class AsyncProjectsResourceWithRawResponse:
- def __init__(self, projects: AsyncProjectsResource) -> None:
- self._projects = projects
-
- self.create = async_to_raw_response_wrapper(
- projects.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- projects.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- projects.update,
- )
- self.list = async_to_raw_response_wrapper(
- projects.list,
- )
- self.archive = async_to_raw_response_wrapper(
- projects.archive,
- )
-
- @cached_property
- def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse:
- return AsyncAPIKeysResourceWithRawResponse(self._projects.api_keys)
-
- @cached_property
- def rate_limits(self) -> AsyncRateLimitsResourceWithRawResponse:
- return AsyncRateLimitsResourceWithRawResponse(self._projects.rate_limits)
-
- @cached_property
- def service_accounts(self) -> AsyncServiceAccountsResourceWithRawResponse:
- return AsyncServiceAccountsResourceWithRawResponse(self._projects.service_accounts)
-
- @cached_property
- def users(self) -> AsyncUsersResourceWithRawResponse:
- return AsyncUsersResourceWithRawResponse(self._projects.users)
-
-
-class ProjectsResourceWithStreamingResponse:
- def __init__(self, projects: ProjectsResource) -> None:
- self._projects = projects
-
- self.create = to_streamed_response_wrapper(
- projects.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- projects.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- projects.update,
- )
- self.list = to_streamed_response_wrapper(
- projects.list,
- )
- self.archive = to_streamed_response_wrapper(
- projects.archive,
- )
-
- @cached_property
- def api_keys(self) -> APIKeysResourceWithStreamingResponse:
- return APIKeysResourceWithStreamingResponse(self._projects.api_keys)
-
- @cached_property
- def rate_limits(self) -> RateLimitsResourceWithStreamingResponse:
- return RateLimitsResourceWithStreamingResponse(self._projects.rate_limits)
-
- @cached_property
- def service_accounts(self) -> ServiceAccountsResourceWithStreamingResponse:
- return ServiceAccountsResourceWithStreamingResponse(self._projects.service_accounts)
-
- @cached_property
- def users(self) -> UsersResourceWithStreamingResponse:
- return UsersResourceWithStreamingResponse(self._projects.users)
-
-
-class AsyncProjectsResourceWithStreamingResponse:
- def __init__(self, projects: AsyncProjectsResource) -> None:
- self._projects = projects
-
- self.create = async_to_streamed_response_wrapper(
- projects.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- projects.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- projects.update,
- )
- self.list = async_to_streamed_response_wrapper(
- projects.list,
- )
- self.archive = async_to_streamed_response_wrapper(
- projects.archive,
- )
-
- @cached_property
- def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse:
- return AsyncAPIKeysResourceWithStreamingResponse(self._projects.api_keys)
-
- @cached_property
- def rate_limits(self) -> AsyncRateLimitsResourceWithStreamingResponse:
- return AsyncRateLimitsResourceWithStreamingResponse(self._projects.rate_limits)
-
- @cached_property
- def service_accounts(self) -> AsyncServiceAccountsResourceWithStreamingResponse:
- return AsyncServiceAccountsResourceWithStreamingResponse(self._projects.service_accounts)
-
- @cached_property
- def users(self) -> AsyncUsersResourceWithStreamingResponse:
- return AsyncUsersResourceWithStreamingResponse(self._projects.users)
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py b/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py
deleted file mode 100644
index 9c9dce7b..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py
+++ /dev/null
@@ -1,360 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.organization.projects import rate_limit_list_params, rate_limit_update_params
-from ....types.organization.projects.rate_limit import RateLimit
-from ....types.organization.projects.rate_limit_list_response import RateLimitListResponse
-
-__all__ = ["RateLimitsResource", "AsyncRateLimitsResource"]
-
-
-class RateLimitsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> RateLimitsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return RateLimitsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> RateLimitsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return RateLimitsResourceWithStreamingResponse(self)
-
- def update(
- self,
- rate_limit_id: str,
- *,
- project_id: str,
- batch_1_day_max_input_tokens: int | NotGiven = NOT_GIVEN,
- max_audio_megabytes_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_images_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_requests_per_1_day: int | NotGiven = NOT_GIVEN,
- max_requests_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_tokens_per_1_minute: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RateLimit:
- """
- Updates a project rate limit.
-
- Args:
- batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models.
-
- max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models.
-
- max_images_per_1_minute: The maximum images per minute. Only relevant for certain models.
-
- max_requests_per_1_day: The maximum requests per day. Only relevant for certain models.
-
- max_requests_per_1_minute: The maximum requests per minute.
-
- max_tokens_per_1_minute: The maximum tokens per minute.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not rate_limit_id:
- raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/rate_limits/{rate_limit_id}",
- body=maybe_transform(
- {
- "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens,
- "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute,
- "max_images_per_1_minute": max_images_per_1_minute,
- "max_requests_per_1_day": max_requests_per_1_day,
- "max_requests_per_1_minute": max_requests_per_1_minute,
- "max_tokens_per_1_minute": max_tokens_per_1_minute,
- },
- rate_limit_update_params.RateLimitUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RateLimit,
- )
-
- def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RateLimitListResponse:
- """
- Returns the rate limits per model for a project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- beginning with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. The default is 100.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/rate_limits",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- },
- rate_limit_list_params.RateLimitListParams,
- ),
- ),
- cast_to=RateLimitListResponse,
- )
-
-
-class AsyncRateLimitsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncRateLimitsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncRateLimitsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncRateLimitsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncRateLimitsResourceWithStreamingResponse(self)
-
- async def update(
- self,
- rate_limit_id: str,
- *,
- project_id: str,
- batch_1_day_max_input_tokens: int | NotGiven = NOT_GIVEN,
- max_audio_megabytes_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_images_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_requests_per_1_day: int | NotGiven = NOT_GIVEN,
- max_requests_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_tokens_per_1_minute: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RateLimit:
- """
- Updates a project rate limit.
-
- Args:
- batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models.
-
- max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models.
-
- max_images_per_1_minute: The maximum images per minute. Only relevant for certain models.
-
- max_requests_per_1_day: The maximum requests per day. Only relevant for certain models.
-
- max_requests_per_1_minute: The maximum requests per minute.
-
- max_tokens_per_1_minute: The maximum tokens per minute.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not rate_limit_id:
- raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/rate_limits/{rate_limit_id}",
- body=await async_maybe_transform(
- {
- "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens,
- "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute,
- "max_images_per_1_minute": max_images_per_1_minute,
- "max_requests_per_1_day": max_requests_per_1_day,
- "max_requests_per_1_minute": max_requests_per_1_minute,
- "max_tokens_per_1_minute": max_tokens_per_1_minute,
- },
- rate_limit_update_params.RateLimitUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RateLimit,
- )
-
- async def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RateLimitListResponse:
- """
- Returns the rate limits per model for a project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- beginning with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. The default is 100.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/rate_limits",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- },
- rate_limit_list_params.RateLimitListParams,
- ),
- ),
- cast_to=RateLimitListResponse,
- )
-
-
-class RateLimitsResourceWithRawResponse:
- def __init__(self, rate_limits: RateLimitsResource) -> None:
- self._rate_limits = rate_limits
-
- self.update = to_raw_response_wrapper(
- rate_limits.update,
- )
- self.list = to_raw_response_wrapper(
- rate_limits.list,
- )
-
-
-class AsyncRateLimitsResourceWithRawResponse:
- def __init__(self, rate_limits: AsyncRateLimitsResource) -> None:
- self._rate_limits = rate_limits
-
- self.update = async_to_raw_response_wrapper(
- rate_limits.update,
- )
- self.list = async_to_raw_response_wrapper(
- rate_limits.list,
- )
-
-
-class RateLimitsResourceWithStreamingResponse:
- def __init__(self, rate_limits: RateLimitsResource) -> None:
- self._rate_limits = rate_limits
-
- self.update = to_streamed_response_wrapper(
- rate_limits.update,
- )
- self.list = to_streamed_response_wrapper(
- rate_limits.list,
- )
-
-
-class AsyncRateLimitsResourceWithStreamingResponse:
- def __init__(self, rate_limits: AsyncRateLimitsResource) -> None:
- self._rate_limits = rate_limits
-
- self.update = async_to_streamed_response_wrapper(
- rate_limits.update,
- )
- self.list = async_to_streamed_response_wrapper(
- rate_limits.list,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py b/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py
deleted file mode 100644
index 8957a81d..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py
+++ /dev/null
@@ -1,466 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.organization.projects import service_account_list_params, service_account_create_params
-from ....types.organization.projects.service_account import ServiceAccount
-from ....types.organization.projects.service_account_list_response import ServiceAccountListResponse
-from ....types.organization.projects.service_account_create_response import ServiceAccountCreateResponse
-from ....types.organization.projects.service_account_delete_response import ServiceAccountDeleteResponse
-
-__all__ = ["ServiceAccountsResource", "AsyncServiceAccountsResource"]
-
-
-class ServiceAccountsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ServiceAccountsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ServiceAccountsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ServiceAccountsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ServiceAccountsResourceWithStreamingResponse(self)
-
- def create(
- self,
- project_id: str,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountCreateResponse:
- """Creates a new service account in the project.
-
- This also returns an unredacted
- API key for the service account.
-
- Args:
- name: The name of the service account being created.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/service_accounts",
- body=maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccountCreateResponse,
- )
-
- def retrieve(
- self,
- service_account_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccount:
- """
- Retrieves a service account in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not service_account_id:
- raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/service_accounts/{service_account_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccount,
- )
-
- def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountListResponse:
- """
- Returns a list of service accounts in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/service_accounts",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- service_account_list_params.ServiceAccountListParams,
- ),
- ),
- cast_to=ServiceAccountListResponse,
- )
-
- def delete(
- self,
- service_account_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountDeleteResponse:
- """
- Deletes a service account from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not service_account_id:
- raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
- return self._delete(
- f"/organization/projects/{project_id}/service_accounts/{service_account_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccountDeleteResponse,
- )
-
-
-class AsyncServiceAccountsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncServiceAccountsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncServiceAccountsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncServiceAccountsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncServiceAccountsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- project_id: str,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountCreateResponse:
- """Creates a new service account in the project.
-
- This also returns an unredacted
- API key for the service account.
-
- Args:
- name: The name of the service account being created.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/service_accounts",
- body=await async_maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccountCreateResponse,
- )
-
- async def retrieve(
- self,
- service_account_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccount:
- """
- Retrieves a service account in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not service_account_id:
- raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/service_accounts/{service_account_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccount,
- )
-
- async def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountListResponse:
- """
- Returns a list of service accounts in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/service_accounts",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- service_account_list_params.ServiceAccountListParams,
- ),
- ),
- cast_to=ServiceAccountListResponse,
- )
-
- async def delete(
- self,
- service_account_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountDeleteResponse:
- """
- Deletes a service account from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not service_account_id:
- raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
- return await self._delete(
- f"/organization/projects/{project_id}/service_accounts/{service_account_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccountDeleteResponse,
- )
-
-
-class ServiceAccountsResourceWithRawResponse:
- def __init__(self, service_accounts: ServiceAccountsResource) -> None:
- self._service_accounts = service_accounts
-
- self.create = to_raw_response_wrapper(
- service_accounts.create,
- )
- self.retrieve = to_raw_response_wrapper(
- service_accounts.retrieve,
- )
- self.list = to_raw_response_wrapper(
- service_accounts.list,
- )
- self.delete = to_raw_response_wrapper(
- service_accounts.delete,
- )
-
-
-class AsyncServiceAccountsResourceWithRawResponse:
- def __init__(self, service_accounts: AsyncServiceAccountsResource) -> None:
- self._service_accounts = service_accounts
-
- self.create = async_to_raw_response_wrapper(
- service_accounts.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- service_accounts.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- service_accounts.list,
- )
- self.delete = async_to_raw_response_wrapper(
- service_accounts.delete,
- )
-
-
-class ServiceAccountsResourceWithStreamingResponse:
- def __init__(self, service_accounts: ServiceAccountsResource) -> None:
- self._service_accounts = service_accounts
-
- self.create = to_streamed_response_wrapper(
- service_accounts.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- service_accounts.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- service_accounts.list,
- )
- self.delete = to_streamed_response_wrapper(
- service_accounts.delete,
- )
-
-
-class AsyncServiceAccountsResourceWithStreamingResponse:
- def __init__(self, service_accounts: AsyncServiceAccountsResource) -> None:
- self._service_accounts = service_accounts
-
- self.create = async_to_streamed_response_wrapper(
- service_accounts.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- service_accounts.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- service_accounts.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- service_accounts.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/users.py b/src/digitalocean_genai_sdk/resources/organization/projects/users.py
deleted file mode 100644
index e35ff0cf..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/users.py
+++ /dev/null
@@ -1,577 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.organization.projects import user_add_params, user_list_params, user_update_params
-from ....types.organization.projects.project_user import ProjectUser
-from ....types.organization.projects.user_list_response import UserListResponse
-from ....types.organization.projects.user_delete_response import UserDeleteResponse
-
-__all__ = ["UsersResource", "AsyncUsersResource"]
-
-
-class UsersResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> UsersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return UsersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> UsersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return UsersResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- user_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """
- Retrieves a user in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
- def update(
- self,
- user_id: str,
- *,
- project_id: str,
- role: Literal["owner", "member"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """
- Modifies a user's role in the project.
-
- Args:
- role: `owner` or `member`
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/users/{user_id}",
- body=maybe_transform({"role": role}, user_update_params.UserUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
- def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserListResponse:
- """
- Returns a list of users in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/users",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- user_list_params.UserListParams,
- ),
- ),
- cast_to=UserListResponse,
- )
-
- def delete(
- self,
- user_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserDeleteResponse:
- """
- Deletes a user from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._delete(
- f"/organization/projects/{project_id}/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UserDeleteResponse,
- )
-
- def add(
- self,
- project_id: str,
- *,
- role: Literal["owner", "member"],
- user_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """Adds a user to the project.
-
- Users must already be members of the organization to
- be added to a project.
-
- Args:
- role: `owner` or `member`
-
- user_id: The ID of the user.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/users",
- body=maybe_transform(
- {
- "role": role,
- "user_id": user_id,
- },
- user_add_params.UserAddParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
-
-class AsyncUsersResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncUsersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncUsersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncUsersResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- user_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """
- Retrieves a user in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
- async def update(
- self,
- user_id: str,
- *,
- project_id: str,
- role: Literal["owner", "member"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """
- Modifies a user's role in the project.
-
- Args:
- role: `owner` or `member`
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/users/{user_id}",
- body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
- async def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserListResponse:
- """
- Returns a list of users in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/users",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- user_list_params.UserListParams,
- ),
- ),
- cast_to=UserListResponse,
- )
-
- async def delete(
- self,
- user_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserDeleteResponse:
- """
- Deletes a user from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._delete(
- f"/organization/projects/{project_id}/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UserDeleteResponse,
- )
-
- async def add(
- self,
- project_id: str,
- *,
- role: Literal["owner", "member"],
- user_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """Adds a user to the project.
-
- Users must already be members of the organization to
- be added to a project.
-
- Args:
- role: `owner` or `member`
-
- user_id: The ID of the user.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/users",
- body=await async_maybe_transform(
- {
- "role": role,
- "user_id": user_id,
- },
- user_add_params.UserAddParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
-
-class UsersResourceWithRawResponse:
- def __init__(self, users: UsersResource) -> None:
- self._users = users
-
- self.retrieve = to_raw_response_wrapper(
- users.retrieve,
- )
- self.update = to_raw_response_wrapper(
- users.update,
- )
- self.list = to_raw_response_wrapper(
- users.list,
- )
- self.delete = to_raw_response_wrapper(
- users.delete,
- )
- self.add = to_raw_response_wrapper(
- users.add,
- )
-
-
-class AsyncUsersResourceWithRawResponse:
- def __init__(self, users: AsyncUsersResource) -> None:
- self._users = users
-
- self.retrieve = async_to_raw_response_wrapper(
- users.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- users.update,
- )
- self.list = async_to_raw_response_wrapper(
- users.list,
- )
- self.delete = async_to_raw_response_wrapper(
- users.delete,
- )
- self.add = async_to_raw_response_wrapper(
- users.add,
- )
-
-
-class UsersResourceWithStreamingResponse:
- def __init__(self, users: UsersResource) -> None:
- self._users = users
-
- self.retrieve = to_streamed_response_wrapper(
- users.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- users.update,
- )
- self.list = to_streamed_response_wrapper(
- users.list,
- )
- self.delete = to_streamed_response_wrapper(
- users.delete,
- )
- self.add = to_streamed_response_wrapper(
- users.add,
- )
-
-
-class AsyncUsersResourceWithStreamingResponse:
- def __init__(self, users: AsyncUsersResource) -> None:
- self._users = users
-
- self.retrieve = async_to_streamed_response_wrapper(
- users.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- users.update,
- )
- self.list = async_to_streamed_response_wrapper(
- users.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- users.delete,
- )
- self.add = async_to_streamed_response_wrapper(
- users.add,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/usage.py b/src/digitalocean_genai_sdk/resources/organization/usage.py
deleted file mode 100644
index 37d11956..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/usage.py
+++ /dev/null
@@ -1,1543 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.organization import (
- usage_images_params,
- usage_embeddings_params,
- usage_completions_params,
- usage_moderations_params,
- usage_vector_stores_params,
- usage_audio_speeches_params,
- usage_audio_transcriptions_params,
- usage_code_interpreter_sessions_params,
-)
-from ...types.usage_response import UsageResponse
-
-__all__ = ["UsageResource", "AsyncUsageResource"]
-
-
-class UsageResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> UsageResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return UsageResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> UsageResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return UsageResourceWithStreamingResponse(self)
-
- def audio_speeches(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get audio speeches usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/audio_speeches",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_audio_speeches_params.UsageAudioSpeechesParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def audio_transcriptions(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get audio transcriptions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/audio_transcriptions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_audio_transcriptions_params.UsageAudioTranscriptionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def code_interpreter_sessions(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get code interpreter sessions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/code_interpreter_sessions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def completions(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- batch: bool | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get completions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By
- default, return both.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of
- them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/completions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "batch": batch,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_completions_params.UsageCompletionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def embeddings(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get embeddings usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/embeddings",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_embeddings_params.UsageEmbeddingsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def images(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]]
- | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | NotGiven = NOT_GIVEN,
- sources: List[Literal["image.generation", "image.edit", "image.variation"]] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get images usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any
- combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- sizes: Return only usages for these image sizes. Possible values are `256x256`,
- `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them.
-
- sources: Return only usages for these sources. Possible values are `image.generation`,
- `image.edit`, `image.variation` or any combination of them.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/images",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "sizes": sizes,
- "sources": sources,
- "user_ids": user_ids,
- },
- usage_images_params.UsageImagesParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def moderations(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get moderations usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/moderations",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_moderations_params.UsageModerationsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def vector_stores(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get vector stores usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/vector_stores",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- usage_vector_stores_params.UsageVectorStoresParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
-
-class AsyncUsageResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncUsageResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncUsageResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncUsageResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncUsageResourceWithStreamingResponse(self)
-
- async def audio_speeches(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get audio speeches usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/audio_speeches",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_audio_speeches_params.UsageAudioSpeechesParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def audio_transcriptions(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get audio transcriptions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/audio_transcriptions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_audio_transcriptions_params.UsageAudioTranscriptionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def code_interpreter_sessions(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get code interpreter sessions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/code_interpreter_sessions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def completions(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- batch: bool | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get completions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By
- default, return both.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of
- them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/completions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "batch": batch,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_completions_params.UsageCompletionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def embeddings(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get embeddings usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/embeddings",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_embeddings_params.UsageEmbeddingsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def images(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]]
- | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | NotGiven = NOT_GIVEN,
- sources: List[Literal["image.generation", "image.edit", "image.variation"]] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get images usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any
- combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- sizes: Return only usages for these image sizes. Possible values are `256x256`,
- `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them.
-
- sources: Return only usages for these sources. Possible values are `image.generation`,
- `image.edit`, `image.variation` or any combination of them.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/images",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "sizes": sizes,
- "sources": sources,
- "user_ids": user_ids,
- },
- usage_images_params.UsageImagesParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def moderations(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get moderations usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/moderations",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_moderations_params.UsageModerationsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def vector_stores(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get vector stores usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/vector_stores",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- usage_vector_stores_params.UsageVectorStoresParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
-
-class UsageResourceWithRawResponse:
- def __init__(self, usage: UsageResource) -> None:
- self._usage = usage
-
- self.audio_speeches = to_raw_response_wrapper(
- usage.audio_speeches,
- )
- self.audio_transcriptions = to_raw_response_wrapper(
- usage.audio_transcriptions,
- )
- self.code_interpreter_sessions = to_raw_response_wrapper(
- usage.code_interpreter_sessions,
- )
- self.completions = to_raw_response_wrapper(
- usage.completions,
- )
- self.embeddings = to_raw_response_wrapper(
- usage.embeddings,
- )
- self.images = to_raw_response_wrapper(
- usage.images,
- )
- self.moderations = to_raw_response_wrapper(
- usage.moderations,
- )
- self.vector_stores = to_raw_response_wrapper(
- usage.vector_stores,
- )
-
-
-class AsyncUsageResourceWithRawResponse:
- def __init__(self, usage: AsyncUsageResource) -> None:
- self._usage = usage
-
- self.audio_speeches = async_to_raw_response_wrapper(
- usage.audio_speeches,
- )
- self.audio_transcriptions = async_to_raw_response_wrapper(
- usage.audio_transcriptions,
- )
- self.code_interpreter_sessions = async_to_raw_response_wrapper(
- usage.code_interpreter_sessions,
- )
- self.completions = async_to_raw_response_wrapper(
- usage.completions,
- )
- self.embeddings = async_to_raw_response_wrapper(
- usage.embeddings,
- )
- self.images = async_to_raw_response_wrapper(
- usage.images,
- )
- self.moderations = async_to_raw_response_wrapper(
- usage.moderations,
- )
- self.vector_stores = async_to_raw_response_wrapper(
- usage.vector_stores,
- )
-
-
-class UsageResourceWithStreamingResponse:
- def __init__(self, usage: UsageResource) -> None:
- self._usage = usage
-
- self.audio_speeches = to_streamed_response_wrapper(
- usage.audio_speeches,
- )
- self.audio_transcriptions = to_streamed_response_wrapper(
- usage.audio_transcriptions,
- )
- self.code_interpreter_sessions = to_streamed_response_wrapper(
- usage.code_interpreter_sessions,
- )
- self.completions = to_streamed_response_wrapper(
- usage.completions,
- )
- self.embeddings = to_streamed_response_wrapper(
- usage.embeddings,
- )
- self.images = to_streamed_response_wrapper(
- usage.images,
- )
- self.moderations = to_streamed_response_wrapper(
- usage.moderations,
- )
- self.vector_stores = to_streamed_response_wrapper(
- usage.vector_stores,
- )
-
-
-class AsyncUsageResourceWithStreamingResponse:
- def __init__(self, usage: AsyncUsageResource) -> None:
- self._usage = usage
-
- self.audio_speeches = async_to_streamed_response_wrapper(
- usage.audio_speeches,
- )
- self.audio_transcriptions = async_to_streamed_response_wrapper(
- usage.audio_transcriptions,
- )
- self.code_interpreter_sessions = async_to_streamed_response_wrapper(
- usage.code_interpreter_sessions,
- )
- self.completions = async_to_streamed_response_wrapper(
- usage.completions,
- )
- self.embeddings = async_to_streamed_response_wrapper(
- usage.embeddings,
- )
- self.images = async_to_streamed_response_wrapper(
- usage.images,
- )
- self.moderations = async_to_streamed_response_wrapper(
- usage.moderations,
- )
- self.vector_stores = async_to_streamed_response_wrapper(
- usage.vector_stores,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/users.py b/src/digitalocean_genai_sdk/resources/organization/users.py
deleted file mode 100644
index 536e4396..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/users.py
+++ /dev/null
@@ -1,454 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.organization import user_list_params, user_update_params
-from ...types.organization.organization_user import OrganizationUser
-from ...types.organization.user_list_response import UserListResponse
-from ...types.organization.user_delete_response import UserDeleteResponse
-
-__all__ = ["UsersResource", "AsyncUsersResource"]
-
-
-class UsersResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> UsersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return UsersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> UsersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return UsersResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- user_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationUser:
- """
- Retrieves a user by their identifier.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._get(
- f"/organization/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OrganizationUser,
- )
-
- def update(
- self,
- user_id: str,
- *,
- role: Literal["owner", "reader"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationUser:
- """
- Modifies a user's role in the organization.
-
- Args:
- role: `owner` or `reader`
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._post(
- f"/organization/users/{user_id}",
- body=maybe_transform({"role": role}, user_update_params.UserUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OrganizationUser,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- emails: List[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserListResponse:
- """
- Lists all of the users in the organization.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- emails: Filter by the email address of users.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/users",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "emails": emails,
- "limit": limit,
- },
- user_list_params.UserListParams,
- ),
- ),
- cast_to=UserListResponse,
- )
-
- def delete(
- self,
- user_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserDeleteResponse:
- """
- Deletes a user from the organization.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._delete(
- f"/organization/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UserDeleteResponse,
- )
-
-
-class AsyncUsersResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncUsersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncUsersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncUsersResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- user_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationUser:
- """
- Retrieves a user by their identifier.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._get(
- f"/organization/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OrganizationUser,
- )
-
- async def update(
- self,
- user_id: str,
- *,
- role: Literal["owner", "reader"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationUser:
- """
- Modifies a user's role in the organization.
-
- Args:
- role: `owner` or `reader`
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._post(
- f"/organization/users/{user_id}",
- body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OrganizationUser,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- emails: List[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserListResponse:
- """
- Lists all of the users in the organization.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- emails: Filter by the email address of users.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/users",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "emails": emails,
- "limit": limit,
- },
- user_list_params.UserListParams,
- ),
- ),
- cast_to=UserListResponse,
- )
-
- async def delete(
- self,
- user_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserDeleteResponse:
- """
- Deletes a user from the organization.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._delete(
- f"/organization/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UserDeleteResponse,
- )
-
-
-class UsersResourceWithRawResponse:
- def __init__(self, users: UsersResource) -> None:
- self._users = users
-
- self.retrieve = to_raw_response_wrapper(
- users.retrieve,
- )
- self.update = to_raw_response_wrapper(
- users.update,
- )
- self.list = to_raw_response_wrapper(
- users.list,
- )
- self.delete = to_raw_response_wrapper(
- users.delete,
- )
-
-
-class AsyncUsersResourceWithRawResponse:
- def __init__(self, users: AsyncUsersResource) -> None:
- self._users = users
-
- self.retrieve = async_to_raw_response_wrapper(
- users.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- users.update,
- )
- self.list = async_to_raw_response_wrapper(
- users.list,
- )
- self.delete = async_to_raw_response_wrapper(
- users.delete,
- )
-
-
-class UsersResourceWithStreamingResponse:
- def __init__(self, users: UsersResource) -> None:
- self._users = users
-
- self.retrieve = to_streamed_response_wrapper(
- users.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- users.update,
- )
- self.list = to_streamed_response_wrapper(
- users.list,
- )
- self.delete = to_streamed_response_wrapper(
- users.delete,
- )
-
-
-class AsyncUsersResourceWithStreamingResponse:
- def __init__(self, users: AsyncUsersResource) -> None:
- self._users = users
-
- self.retrieve = async_to_streamed_response_wrapper(
- users.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- users.update,
- )
- self.list = async_to_streamed_response_wrapper(
- users.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- users.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/realtime.py b/src/digitalocean_genai_sdk/resources/realtime.py
deleted file mode 100644
index 4c70a798..00000000
--- a/src/digitalocean_genai_sdk/resources/realtime.py
+++ /dev/null
@@ -1,574 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import realtime_create_session_params, realtime_create_transcription_session_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.voice_ids_shared_param import VoiceIDsSharedParam
-from ..types.realtime_create_session_response import RealtimeCreateSessionResponse
-from ..types.realtime_create_transcription_session_response import RealtimeCreateTranscriptionSessionResponse
-
-__all__ = ["RealtimeResource", "AsyncRealtimeResource"]
-
-
-class RealtimeResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> RealtimeResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return RealtimeResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> RealtimeResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return RealtimeResourceWithStreamingResponse(self)
-
- def create_session(
- self,
- *,
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- input_audio_noise_reduction: realtime_create_session_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
- input_audio_transcription: realtime_create_session_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
- instructions: str | NotGiven = NOT_GIVEN,
- max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
- modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
- model: Literal[
- "gpt-4o-realtime-preview",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- ]
- | NotGiven = NOT_GIVEN,
- output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: str | NotGiven = NOT_GIVEN,
- tools: Iterable[realtime_create_session_params.Tool] | NotGiven = NOT_GIVEN,
- turn_detection: realtime_create_session_params.TurnDetection | NotGiven = NOT_GIVEN,
- voice: VoiceIDsSharedParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RealtimeCreateSessionResponse:
- """
- Create an ephemeral API token for use in client-side applications with the
- Realtime API. Can be configured with the same session parameters as the
- `session.update` client event.
-
- It responds with a session object, plus a `client_secret` key which contains a
- usable ephemeral API token that can be used to authenticate browser clients for
- the Realtime API.
-
- Args:
- input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
- `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
- (mono), and little-endian byte order.
-
- input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
- off. Noise reduction filters audio added to the input audio buffer before it is
- sent to VAD and the model. Filtering the audio can improve VAD and turn
- detection accuracy (reducing false positives) and model performance by improving
- perception of the input audio.
-
- input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
- `null` to turn off once on. Input audio transcription is not native to the
- model, since the model consumes audio directly. Transcription runs
- asynchronously through
- [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
- and should be treated as guidance of input audio content rather than precisely
- what the model heard. The client can optionally set the language and prompt for
- transcription, these offer additional guidance to the transcription service.
-
- instructions: The default system instructions (i.e. system message) prepended to model calls.
- This field allows the client to guide the model on desired responses. The model
- can be instructed on response content and format, (e.g. "be extremely succinct",
- "act friendly", "here are examples of good responses") and on audio behavior
- (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
- instructions are not guaranteed to be followed by the model, but they provide
- guidance to the model on the desired behavior.
-
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
-
- max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
-
- modalities: The set of modalities the model can respond with. To disable audio, set this to
- ["text"].
-
- model: The Realtime model used for this session.
-
- output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
- For `pcm16`, output audio is sampled at a rate of 24kHz.
-
- temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
- temperature of 0.8 is highly recommended for best performance.
-
- tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
- a function.
-
- tools: Tools (functions) available to the model.
-
- turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
- set to `null` to turn off, in which case the client must manually trigger model
- response. Server VAD means that the model will detect the start and end of
- speech based on audio volume and respond at the end of user speech. Semantic VAD
- is more advanced and uses a turn detection model (in conjuction with VAD) to
- semantically estimate whether the user has finished speaking, then dynamically
- sets a timeout based on this probability. For example, if user audio trails off
- with "uhhm", the model will score a low probability of turn end and wait longer
- for the user to continue speaking. This can be useful for more natural
- conversations, but may have a higher latency.
-
- voice: The voice the model uses to respond. Voice cannot be changed during the session
- once the model has responded with audio at least once. Current voice options are
- `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- `shimmer`, and `verse`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/realtime/sessions",
- body=maybe_transform(
- {
- "input_audio_format": input_audio_format,
- "input_audio_noise_reduction": input_audio_noise_reduction,
- "input_audio_transcription": input_audio_transcription,
- "instructions": instructions,
- "max_response_output_tokens": max_response_output_tokens,
- "modalities": modalities,
- "model": model,
- "output_audio_format": output_audio_format,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "turn_detection": turn_detection,
- "voice": voice,
- },
- realtime_create_session_params.RealtimeCreateSessionParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RealtimeCreateSessionResponse,
- )
-
- def create_transcription_session(
- self,
- *,
- include: List[str] | NotGiven = NOT_GIVEN,
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- input_audio_noise_reduction: realtime_create_transcription_session_params.InputAudioNoiseReduction
- | NotGiven = NOT_GIVEN,
- input_audio_transcription: realtime_create_transcription_session_params.InputAudioTranscription
- | NotGiven = NOT_GIVEN,
- modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
- turn_detection: realtime_create_transcription_session_params.TurnDetection | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RealtimeCreateTranscriptionSessionResponse:
- """
- Create an ephemeral API token for use in client-side applications with the
- Realtime API specifically for realtime transcriptions. Can be configured with
- the same session parameters as the `transcription_session.update` client event.
-
- It responds with a session object, plus a `client_secret` key which contains a
- usable ephemeral API token that can be used to authenticate browser clients for
- the Realtime API.
-
- Args:
- include:
- The set of items to include in the transcription. Current available items are:
-
- - `item.input_audio_transcription.logprobs`
-
- input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
- `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
- (mono), and little-endian byte order.
-
- input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
- off. Noise reduction filters audio added to the input audio buffer before it is
- sent to VAD and the model. Filtering the audio can improve VAD and turn
- detection accuracy (reducing false positives) and model performance by improving
- perception of the input audio.
-
- input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
- language and prompt for transcription, these offer additional guidance to the
- transcription service.
-
- modalities: The set of modalities the model can respond with. To disable audio, set this to
- ["text"].
-
- turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
- set to `null` to turn off, in which case the client must manually trigger model
- response. Server VAD means that the model will detect the start and end of
- speech based on audio volume and respond at the end of user speech. Semantic VAD
- is more advanced and uses a turn detection model (in conjuction with VAD) to
- semantically estimate whether the user has finished speaking, then dynamically
- sets a timeout based on this probability. For example, if user audio trails off
- with "uhhm", the model will score a low probability of turn end and wait longer
- for the user to continue speaking. This can be useful for more natural
- conversations, but may have a higher latency.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/realtime/transcription_sessions",
- body=maybe_transform(
- {
- "include": include,
- "input_audio_format": input_audio_format,
- "input_audio_noise_reduction": input_audio_noise_reduction,
- "input_audio_transcription": input_audio_transcription,
- "modalities": modalities,
- "turn_detection": turn_detection,
- },
- realtime_create_transcription_session_params.RealtimeCreateTranscriptionSessionParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RealtimeCreateTranscriptionSessionResponse,
- )
-
-
-class AsyncRealtimeResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncRealtimeResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncRealtimeResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncRealtimeResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncRealtimeResourceWithStreamingResponse(self)
-
- async def create_session(
- self,
- *,
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- input_audio_noise_reduction: realtime_create_session_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
- input_audio_transcription: realtime_create_session_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
- instructions: str | NotGiven = NOT_GIVEN,
- max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
- modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
- model: Literal[
- "gpt-4o-realtime-preview",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- ]
- | NotGiven = NOT_GIVEN,
- output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: str | NotGiven = NOT_GIVEN,
- tools: Iterable[realtime_create_session_params.Tool] | NotGiven = NOT_GIVEN,
- turn_detection: realtime_create_session_params.TurnDetection | NotGiven = NOT_GIVEN,
- voice: VoiceIDsSharedParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RealtimeCreateSessionResponse:
- """
- Create an ephemeral API token for use in client-side applications with the
- Realtime API. Can be configured with the same session parameters as the
- `session.update` client event.
-
- It responds with a session object, plus a `client_secret` key which contains a
- usable ephemeral API token that can be used to authenticate browser clients for
- the Realtime API.
-
- Args:
- input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
- `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
- (mono), and little-endian byte order.
-
- input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
- off. Noise reduction filters audio added to the input audio buffer before it is
- sent to VAD and the model. Filtering the audio can improve VAD and turn
- detection accuracy (reducing false positives) and model performance by improving
- perception of the input audio.
-
- input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
- `null` to turn off once on. Input audio transcription is not native to the
- model, since the model consumes audio directly. Transcription runs
- asynchronously through
- [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
- and should be treated as guidance of input audio content rather than precisely
- what the model heard. The client can optionally set the language and prompt for
- transcription, these offer additional guidance to the transcription service.
-
- instructions: The default system instructions (i.e. system message) prepended to model calls.
- This field allows the client to guide the model on desired responses. The model
- can be instructed on response content and format, (e.g. "be extremely succinct",
- "act friendly", "here are examples of good responses") and on audio behavior
- (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
- instructions are not guaranteed to be followed by the model, but they provide
- guidance to the model on the desired behavior.
-
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
-
- max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
-
- modalities: The set of modalities the model can respond with. To disable audio, set this to
- ["text"].
-
- model: The Realtime model used for this session.
-
- output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
- For `pcm16`, output audio is sampled at a rate of 24kHz.
-
- temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
- temperature of 0.8 is highly recommended for best performance.
-
- tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
- a function.
-
- tools: Tools (functions) available to the model.
-
- turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
- set to `null` to turn off, in which case the client must manually trigger model
- response. Server VAD means that the model will detect the start and end of
- speech based on audio volume and respond at the end of user speech. Semantic VAD
- is more advanced and uses a turn detection model (in conjuction with VAD) to
- semantically estimate whether the user has finished speaking, then dynamically
- sets a timeout based on this probability. For example, if user audio trails off
- with "uhhm", the model will score a low probability of turn end and wait longer
- for the user to continue speaking. This can be useful for more natural
- conversations, but may have a higher latency.
-
- voice: The voice the model uses to respond. Voice cannot be changed during the session
- once the model has responded with audio at least once. Current voice options are
- `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- `shimmer`, and `verse`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/realtime/sessions",
- body=await async_maybe_transform(
- {
- "input_audio_format": input_audio_format,
- "input_audio_noise_reduction": input_audio_noise_reduction,
- "input_audio_transcription": input_audio_transcription,
- "instructions": instructions,
- "max_response_output_tokens": max_response_output_tokens,
- "modalities": modalities,
- "model": model,
- "output_audio_format": output_audio_format,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "turn_detection": turn_detection,
- "voice": voice,
- },
- realtime_create_session_params.RealtimeCreateSessionParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RealtimeCreateSessionResponse,
- )
-
- async def create_transcription_session(
- self,
- *,
- include: List[str] | NotGiven = NOT_GIVEN,
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- input_audio_noise_reduction: realtime_create_transcription_session_params.InputAudioNoiseReduction
- | NotGiven = NOT_GIVEN,
- input_audio_transcription: realtime_create_transcription_session_params.InputAudioTranscription
- | NotGiven = NOT_GIVEN,
- modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
- turn_detection: realtime_create_transcription_session_params.TurnDetection | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RealtimeCreateTranscriptionSessionResponse:
- """
- Create an ephemeral API token for use in client-side applications with the
- Realtime API specifically for realtime transcriptions. Can be configured with
- the same session parameters as the `transcription_session.update` client event.
-
- It responds with a session object, plus a `client_secret` key which contains a
- usable ephemeral API token that can be used to authenticate browser clients for
- the Realtime API.
-
- Args:
- include:
- The set of items to include in the transcription. Current available items are:
-
- - `item.input_audio_transcription.logprobs`
-
- input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
- `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
- (mono), and little-endian byte order.
-
- input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
- off. Noise reduction filters audio added to the input audio buffer before it is
- sent to VAD and the model. Filtering the audio can improve VAD and turn
- detection accuracy (reducing false positives) and model performance by improving
- perception of the input audio.
-
- input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
- language and prompt for transcription, these offer additional guidance to the
- transcription service.
-
- modalities: The set of modalities the model can respond with. To disable audio, set this to
- ["text"].
-
- turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
- set to `null` to turn off, in which case the client must manually trigger model
- response. Server VAD means that the model will detect the start and end of
- speech based on audio volume and respond at the end of user speech. Semantic VAD
- is more advanced and uses a turn detection model (in conjuction with VAD) to
- semantically estimate whether the user has finished speaking, then dynamically
- sets a timeout based on this probability. For example, if user audio trails off
- with "uhhm", the model will score a low probability of turn end and wait longer
- for the user to continue speaking. This can be useful for more natural
- conversations, but may have a higher latency.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/realtime/transcription_sessions",
- body=await async_maybe_transform(
- {
- "include": include,
- "input_audio_format": input_audio_format,
- "input_audio_noise_reduction": input_audio_noise_reduction,
- "input_audio_transcription": input_audio_transcription,
- "modalities": modalities,
- "turn_detection": turn_detection,
- },
- realtime_create_transcription_session_params.RealtimeCreateTranscriptionSessionParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RealtimeCreateTranscriptionSessionResponse,
- )
-
-
-class RealtimeResourceWithRawResponse:
- def __init__(self, realtime: RealtimeResource) -> None:
- self._realtime = realtime
-
- self.create_session = to_raw_response_wrapper(
- realtime.create_session,
- )
- self.create_transcription_session = to_raw_response_wrapper(
- realtime.create_transcription_session,
- )
-
-
-class AsyncRealtimeResourceWithRawResponse:
- def __init__(self, realtime: AsyncRealtimeResource) -> None:
- self._realtime = realtime
-
- self.create_session = async_to_raw_response_wrapper(
- realtime.create_session,
- )
- self.create_transcription_session = async_to_raw_response_wrapper(
- realtime.create_transcription_session,
- )
-
-
-class RealtimeResourceWithStreamingResponse:
- def __init__(self, realtime: RealtimeResource) -> None:
- self._realtime = realtime
-
- self.create_session = to_streamed_response_wrapper(
- realtime.create_session,
- )
- self.create_transcription_session = to_streamed_response_wrapper(
- realtime.create_transcription_session,
- )
-
-
-class AsyncRealtimeResourceWithStreamingResponse:
- def __init__(self, realtime: AsyncRealtimeResource) -> None:
- self._realtime = realtime
-
- self.create_session = async_to_streamed_response_wrapper(
- realtime.create_session,
- )
- self.create_transcription_session = async_to_streamed_response_wrapper(
- realtime.create_transcription_session,
- )
diff --git a/src/digitalocean_genai_sdk/resources/responses.py b/src/digitalocean_genai_sdk/resources/responses.py
deleted file mode 100644
index 03445cdc..00000000
--- a/src/digitalocean_genai_sdk/resources/responses.py
+++ /dev/null
@@ -1,902 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import response_create_params, response_retrieve_params, response_list_input_items_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.response import Response
-from ..types.includable import Includable
-from ..types.response_list_input_items_response import ResponseListInputItemsResponse
-
-__all__ = ["ResponsesResource", "AsyncResponsesResource"]
-
-
-class ResponsesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ResponsesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ResponsesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ResponsesResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- input: Union[str, Iterable[response_create_params.InputInputItemList]],
- model: Union[
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- "o1-pro",
- "o1-pro-2025-03-19",
- "computer-use-preview",
- "computer-use-preview-2025-03-11",
- ],
- str,
- ],
- include: Optional[List[Includable]] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning: Optional[response_create_params.Reasoning] | NotGiven = NOT_GIVEN,
- store: Optional[bool] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
- tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[response_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Response:
- """Creates a model response.
-
- Provide [text](/docs/guides/text) or
- [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or
- [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own
- [custom code](/docs/guides/function-calling) or use built-in
- [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search) to use your own data as input for
- the model's response.
-
- Args:
- input: Text, image, or file inputs to the model, used to generate a response.
-
- Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Image inputs](/docs/guides/images)
- - [File inputs](/docs/guides/pdf-files)
- - [Conversation state](/docs/guides/conversation-state)
- - [Function calling](/docs/guides/function-calling)
-
- model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
- wide range of models with different capabilities, performance characteristics,
- and price points. Refer to the [model guide](/docs/models) to browse and compare
- available models.
-
- include: Specify additional output data to include in the model response. Currently
- supported values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
-
- instructions: Inserts a system (or developer) message as the first item in the model's
- context.
-
- When using along with `previous_response_id`, the instructions from a previous
- response will be not be carried over to the next response. This makes it simple
- to swap out system (or developer) messages in new responses.
-
- max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
-
- previous_response_id: The unique ID of the previous response to the model. Use this to create
- multi-turn conversations. Learn more about
- [conversation state](/docs/guides/conversation-state).
-
- reasoning: **o-series models only**
-
- Configuration options for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning).
-
- store: Whether to store the generated model response for later retrieval via API.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/responses-streaming) for
- more information.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic. We generally recommend altering this or `top_p` but
- not both.
-
- text: Configuration options for a text response from the model. Can be plain text or
- structured JSON data. Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Structured Outputs](/docs/guides/structured-outputs)
-
- tool_choice: How the model should select which tool (or tools) to use when generating a
- response. See the `tools` parameter to see how to specify which tools the model
- can call.
-
- tools: An array of tools the model may call while generating a response. You can
- specify which tool to use by setting the `tool_choice` parameter.
-
- The two categories of tools you can provide the model are:
-
- - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
- capabilities, like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search). Learn more about
- [built-in tools](/docs/guides/tools).
- - **Function calls (custom tools)**: Functions that are defined by you, enabling
- the model to call your own code. Learn more about
- [function calling](/docs/guides/function-calling).
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- truncation: The truncation strategy to use for the model response.
-
- - `auto`: If the context of this response and previous ones exceeds the model's
- context window size, the model will truncate the response to fit the context
- window by dropping input items in the middle of the conversation.
- - `disabled` (default): If a model response will exceed the context window size
- for a model, the request will fail with a 400 error.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/responses",
- body=maybe_transform(
- {
- "input": input,
- "model": model,
- "include": include,
- "instructions": instructions,
- "max_output_tokens": max_output_tokens,
- "metadata": metadata,
- "parallel_tool_calls": parallel_tool_calls,
- "previous_response_id": previous_response_id,
- "reasoning": reasoning,
- "store": store,
- "stream": stream,
- "temperature": temperature,
- "text": text,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_p": top_p,
- "truncation": truncation,
- "user": user,
- },
- response_create_params.ResponseCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Response,
- )
-
- def retrieve(
- self,
- response_id: str,
- *,
- include: List[Includable] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Response:
- """
- Retrieves a model response with the given ID.
-
- Args:
- include: Specify additional output data to include in the response. Currently supported
- values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- return self._get(
- f"/responses/{response_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"include": include}, response_retrieve_params.ResponseRetrieveParams),
- ),
- cast_to=Response,
- )
-
- def delete(
- self,
- response_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
- """
- Deletes a model response with the given ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- f"/responses/{response_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- def list_input_items(
- self,
- response_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ResponseListInputItemsResponse:
- """
- Returns a list of input items for a given response.
-
- Args:
- after: An item ID to list items after, used in pagination.
-
- before: An item ID to list items before, used in pagination.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: The order to return the input items in. Default is `asc`.
-
- - `asc`: Return the input items in ascending order.
- - `desc`: Return the input items in descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- return self._get(
- f"/responses/{response_id}/input_items",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- response_list_input_items_params.ResponseListInputItemsParams,
- ),
- ),
- cast_to=ResponseListInputItemsResponse,
- )
-
-
-class AsyncResponsesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncResponsesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncResponsesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- input: Union[str, Iterable[response_create_params.InputInputItemList]],
- model: Union[
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- "o1-pro",
- "o1-pro-2025-03-19",
- "computer-use-preview",
- "computer-use-preview-2025-03-11",
- ],
- str,
- ],
- include: Optional[List[Includable]] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning: Optional[response_create_params.Reasoning] | NotGiven = NOT_GIVEN,
- store: Optional[bool] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
- tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[response_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Response:
- """Creates a model response.
-
- Provide [text](/docs/guides/text) or
- [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or
- [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own
- [custom code](/docs/guides/function-calling) or use built-in
- [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search) to use your own data as input for
- the model's response.
-
- Args:
- input: Text, image, or file inputs to the model, used to generate a response.
-
- Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Image inputs](/docs/guides/images)
- - [File inputs](/docs/guides/pdf-files)
- - [Conversation state](/docs/guides/conversation-state)
- - [Function calling](/docs/guides/function-calling)
-
- model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
- wide range of models with different capabilities, performance characteristics,
- and price points. Refer to the [model guide](/docs/models) to browse and compare
- available models.
-
- include: Specify additional output data to include in the model response. Currently
- supported values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
-
- instructions: Inserts a system (or developer) message as the first item in the model's
- context.
-
- When using along with `previous_response_id`, the instructions from a previous
- response will be not be carried over to the next response. This makes it simple
- to swap out system (or developer) messages in new responses.
-
- max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
-
- previous_response_id: The unique ID of the previous response to the model. Use this to create
- multi-turn conversations. Learn more about
- [conversation state](/docs/guides/conversation-state).
-
- reasoning: **o-series models only**
-
- Configuration options for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning).
-
- store: Whether to store the generated model response for later retrieval via API.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/responses-streaming) for
- more information.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic. We generally recommend altering this or `top_p` but
- not both.
-
- text: Configuration options for a text response from the model. Can be plain text or
- structured JSON data. Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Structured Outputs](/docs/guides/structured-outputs)
-
- tool_choice: How the model should select which tool (or tools) to use when generating a
- response. See the `tools` parameter to see how to specify which tools the model
- can call.
-
- tools: An array of tools the model may call while generating a response. You can
- specify which tool to use by setting the `tool_choice` parameter.
-
- The two categories of tools you can provide the model are:
-
- - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
- capabilities, like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search). Learn more about
- [built-in tools](/docs/guides/tools).
- - **Function calls (custom tools)**: Functions that are defined by you, enabling
- the model to call your own code. Learn more about
- [function calling](/docs/guides/function-calling).
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- truncation: The truncation strategy to use for the model response.
-
- - `auto`: If the context of this response and previous ones exceeds the model's
- context window size, the model will truncate the response to fit the context
- window by dropping input items in the middle of the conversation.
- - `disabled` (default): If a model response will exceed the context window size
- for a model, the request will fail with a 400 error.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/responses",
- body=await async_maybe_transform(
- {
- "input": input,
- "model": model,
- "include": include,
- "instructions": instructions,
- "max_output_tokens": max_output_tokens,
- "metadata": metadata,
- "parallel_tool_calls": parallel_tool_calls,
- "previous_response_id": previous_response_id,
- "reasoning": reasoning,
- "store": store,
- "stream": stream,
- "temperature": temperature,
- "text": text,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_p": top_p,
- "truncation": truncation,
- "user": user,
- },
- response_create_params.ResponseCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Response,
- )
-
- async def retrieve(
- self,
- response_id: str,
- *,
- include: List[Includable] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Response:
- """
- Retrieves a model response with the given ID.
-
- Args:
- include: Specify additional output data to include in the response. Currently supported
- values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- return await self._get(
- f"/responses/{response_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {"include": include}, response_retrieve_params.ResponseRetrieveParams
- ),
- ),
- cast_to=Response,
- )
-
- async def delete(
- self,
- response_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
- """
- Deletes a model response with the given ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- f"/responses/{response_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- async def list_input_items(
- self,
- response_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ResponseListInputItemsResponse:
- """
- Returns a list of input items for a given response.
-
- Args:
- after: An item ID to list items after, used in pagination.
-
- before: An item ID to list items before, used in pagination.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: The order to return the input items in. Default is `asc`.
-
- - `asc`: Return the input items in ascending order.
- - `desc`: Return the input items in descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- return await self._get(
- f"/responses/{response_id}/input_items",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- response_list_input_items_params.ResponseListInputItemsParams,
- ),
- ),
- cast_to=ResponseListInputItemsResponse,
- )
-
-
-class ResponsesResourceWithRawResponse:
- def __init__(self, responses: ResponsesResource) -> None:
- self._responses = responses
-
- self.create = to_raw_response_wrapper(
- responses.create,
- )
- self.retrieve = to_raw_response_wrapper(
- responses.retrieve,
- )
- self.delete = to_raw_response_wrapper(
- responses.delete,
- )
- self.list_input_items = to_raw_response_wrapper(
- responses.list_input_items,
- )
-
-
-class AsyncResponsesResourceWithRawResponse:
- def __init__(self, responses: AsyncResponsesResource) -> None:
- self._responses = responses
-
- self.create = async_to_raw_response_wrapper(
- responses.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- responses.retrieve,
- )
- self.delete = async_to_raw_response_wrapper(
- responses.delete,
- )
- self.list_input_items = async_to_raw_response_wrapper(
- responses.list_input_items,
- )
-
-
-class ResponsesResourceWithStreamingResponse:
- def __init__(self, responses: ResponsesResource) -> None:
- self._responses = responses
-
- self.create = to_streamed_response_wrapper(
- responses.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- responses.retrieve,
- )
- self.delete = to_streamed_response_wrapper(
- responses.delete,
- )
- self.list_input_items = to_streamed_response_wrapper(
- responses.list_input_items,
- )
-
-
-class AsyncResponsesResourceWithStreamingResponse:
- def __init__(self, responses: AsyncResponsesResource) -> None:
- self._responses = responses
-
- self.create = async_to_streamed_response_wrapper(
- responses.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- responses.retrieve,
- )
- self.delete = async_to_streamed_response_wrapper(
- responses.delete,
- )
- self.list_input_items = async_to_streamed_response_wrapper(
- responses.list_input_items,
- )
diff --git a/src/digitalocean_genai_sdk/resources/threads/__init__.py b/src/digitalocean_genai_sdk/resources/threads/__init__.py
deleted file mode 100644
index 736b9bd6..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .runs import (
- RunsResource,
- AsyncRunsResource,
- RunsResourceWithRawResponse,
- AsyncRunsResourceWithRawResponse,
- RunsResourceWithStreamingResponse,
- AsyncRunsResourceWithStreamingResponse,
-)
-from .threads import (
- ThreadsResource,
- AsyncThreadsResource,
- ThreadsResourceWithRawResponse,
- AsyncThreadsResourceWithRawResponse,
- ThreadsResourceWithStreamingResponse,
- AsyncThreadsResourceWithStreamingResponse,
-)
-from .messages import (
- MessagesResource,
- AsyncMessagesResource,
- MessagesResourceWithRawResponse,
- AsyncMessagesResourceWithRawResponse,
- MessagesResourceWithStreamingResponse,
- AsyncMessagesResourceWithStreamingResponse,
-)
-
-__all__ = [
- "RunsResource",
- "AsyncRunsResource",
- "RunsResourceWithRawResponse",
- "AsyncRunsResourceWithRawResponse",
- "RunsResourceWithStreamingResponse",
- "AsyncRunsResourceWithStreamingResponse",
- "MessagesResource",
- "AsyncMessagesResource",
- "MessagesResourceWithRawResponse",
- "AsyncMessagesResourceWithRawResponse",
- "MessagesResourceWithStreamingResponse",
- "AsyncMessagesResourceWithStreamingResponse",
- "ThreadsResource",
- "AsyncThreadsResource",
- "ThreadsResourceWithRawResponse",
- "AsyncThreadsResourceWithRawResponse",
- "ThreadsResourceWithStreamingResponse",
- "AsyncThreadsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/threads/messages.py b/src/digitalocean_genai_sdk/resources/threads/messages.py
deleted file mode 100644
index e62eb94c..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/messages.py
+++ /dev/null
@@ -1,654 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.threads import message_list_params, message_create_params, message_update_params
-from ...types.threads.message_object import MessageObject
-from ...types.threads.message_list_response import MessageListResponse
-from ...types.threads.message_delete_response import MessageDeleteResponse
-
-__all__ = ["MessagesResource", "AsyncMessagesResource"]
-
-
-class MessagesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> MessagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return MessagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> MessagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return MessagesResourceWithStreamingResponse(self)
-
- def create(
- self,
- thread_id: str,
- *,
- content: Union[str, Iterable[message_create_params.ContentArrayOfContentPart]],
- role: Literal["user", "assistant"],
- attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Create a message.
-
- Args:
- content: The text contents of the message.
-
- role:
- The role of the entity that is creating the message. Allowed values include:
-
- - `user`: Indicates the message is sent by an actual user and should be used in
- most cases to represent user-generated messages.
- - `assistant`: Indicates the message is generated by the assistant. Use this
- value to insert messages from the assistant into the conversation.
-
- attachments: A list of files attached to the message, and the tools they should be added to.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._post(
- f"/threads/{thread_id}/messages",
- body=maybe_transform(
- {
- "content": content,
- "role": role,
- "attachments": attachments,
- "metadata": metadata,
- },
- message_create_params.MessageCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- def retrieve(
- self,
- message_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Retrieve a message.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return self._get(
- f"/threads/{thread_id}/messages/{message_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- def update(
- self,
- message_id: str,
- *,
- thread_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Modifies a message.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return self._post(
- f"/threads/{thread_id}/messages/{message_id}",
- body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- def list(
- self,
- thread_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- run_id: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageListResponse:
- """
- Returns a list of messages for a given thread.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- run_id: Filter messages by the run ID that generated them.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._get(
- f"/threads/{thread_id}/messages",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- "run_id": run_id,
- },
- message_list_params.MessageListParams,
- ),
- ),
- cast_to=MessageListResponse,
- )
-
- def delete(
- self,
- message_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageDeleteResponse:
- """
- Deletes a message.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return self._delete(
- f"/threads/{thread_id}/messages/{message_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageDeleteResponse,
- )
-
-
-class AsyncMessagesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncMessagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncMessagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncMessagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncMessagesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- thread_id: str,
- *,
- content: Union[str, Iterable[message_create_params.ContentArrayOfContentPart]],
- role: Literal["user", "assistant"],
- attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Create a message.
-
- Args:
- content: The text contents of the message.
-
- role:
- The role of the entity that is creating the message. Allowed values include:
-
- - `user`: Indicates the message is sent by an actual user and should be used in
- most cases to represent user-generated messages.
- - `assistant`: Indicates the message is generated by the assistant. Use this
- value to insert messages from the assistant into the conversation.
-
- attachments: A list of files attached to the message, and the tools they should be added to.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._post(
- f"/threads/{thread_id}/messages",
- body=await async_maybe_transform(
- {
- "content": content,
- "role": role,
- "attachments": attachments,
- "metadata": metadata,
- },
- message_create_params.MessageCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- async def retrieve(
- self,
- message_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Retrieve a message.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return await self._get(
- f"/threads/{thread_id}/messages/{message_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- async def update(
- self,
- message_id: str,
- *,
- thread_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Modifies a message.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return await self._post(
- f"/threads/{thread_id}/messages/{message_id}",
- body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- async def list(
- self,
- thread_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- run_id: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageListResponse:
- """
- Returns a list of messages for a given thread.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- run_id: Filter messages by the run ID that generated them.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._get(
- f"/threads/{thread_id}/messages",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- "run_id": run_id,
- },
- message_list_params.MessageListParams,
- ),
- ),
- cast_to=MessageListResponse,
- )
-
- async def delete(
- self,
- message_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageDeleteResponse:
- """
- Deletes a message.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return await self._delete(
- f"/threads/{thread_id}/messages/{message_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageDeleteResponse,
- )
-
-
-class MessagesResourceWithRawResponse:
- def __init__(self, messages: MessagesResource) -> None:
- self._messages = messages
-
- self.create = to_raw_response_wrapper(
- messages.create,
- )
- self.retrieve = to_raw_response_wrapper(
- messages.retrieve,
- )
- self.update = to_raw_response_wrapper(
- messages.update,
- )
- self.list = to_raw_response_wrapper(
- messages.list,
- )
- self.delete = to_raw_response_wrapper(
- messages.delete,
- )
-
-
-class AsyncMessagesResourceWithRawResponse:
- def __init__(self, messages: AsyncMessagesResource) -> None:
- self._messages = messages
-
- self.create = async_to_raw_response_wrapper(
- messages.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- messages.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- messages.update,
- )
- self.list = async_to_raw_response_wrapper(
- messages.list,
- )
- self.delete = async_to_raw_response_wrapper(
- messages.delete,
- )
-
-
-class MessagesResourceWithStreamingResponse:
- def __init__(self, messages: MessagesResource) -> None:
- self._messages = messages
-
- self.create = to_streamed_response_wrapper(
- messages.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- messages.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- messages.update,
- )
- self.list = to_streamed_response_wrapper(
- messages.list,
- )
- self.delete = to_streamed_response_wrapper(
- messages.delete,
- )
-
-
-class AsyncMessagesResourceWithStreamingResponse:
- def __init__(self, messages: AsyncMessagesResource) -> None:
- self._messages = messages
-
- self.create = async_to_streamed_response_wrapper(
- messages.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- messages.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- messages.update,
- )
- self.list = async_to_streamed_response_wrapper(
- messages.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- messages.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py b/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py
deleted file mode 100644
index 70942400..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .runs import (
- RunsResource,
- AsyncRunsResource,
- RunsResourceWithRawResponse,
- AsyncRunsResourceWithRawResponse,
- RunsResourceWithStreamingResponse,
- AsyncRunsResourceWithStreamingResponse,
-)
-from .steps import (
- StepsResource,
- AsyncStepsResource,
- StepsResourceWithRawResponse,
- AsyncStepsResourceWithRawResponse,
- StepsResourceWithStreamingResponse,
- AsyncStepsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "StepsResource",
- "AsyncStepsResource",
- "StepsResourceWithRawResponse",
- "AsyncStepsResourceWithRawResponse",
- "StepsResourceWithStreamingResponse",
- "AsyncStepsResourceWithStreamingResponse",
- "RunsResource",
- "AsyncRunsResource",
- "RunsResourceWithRawResponse",
- "AsyncRunsResourceWithRawResponse",
- "RunsResourceWithStreamingResponse",
- "AsyncRunsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py b/src/digitalocean_genai_sdk/resources/threads/runs/runs.py
deleted file mode 100644
index a270b7a9..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py
+++ /dev/null
@@ -1,1427 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from .steps import (
- StepsResource,
- AsyncStepsResource,
- StepsResourceWithRawResponse,
- AsyncStepsResourceWithRawResponse,
- StepsResourceWithStreamingResponse,
- AsyncStepsResourceWithStreamingResponse,
-)
-from ....types import ReasoningEffort
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.threads import (
- run_list_params,
- run_create_params,
- run_update_params,
- run_create_run_params,
- run_submit_tool_outputs_params,
-)
-from ....types.reasoning_effort import ReasoningEffort
-from ....types.threads.run_object import RunObject
-from ....types.threads.run_list_response import RunListResponse
-from ....types.assistant_supported_models import AssistantSupportedModels
-from ....types.create_thread_request_param import CreateThreadRequestParam
-from ....types.threads.truncation_object_param import TruncationObjectParam
-from ....types.threads.create_message_request_param import CreateMessageRequestParam
-from ....types.assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-from ....types.threads.assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam
-
-__all__ = ["RunsResource", "AsyncRunsResource"]
-
-
-class RunsResource(SyncAPIResource):
- @cached_property
- def steps(self) -> StepsResource:
- return StepsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> RunsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return RunsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> RunsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return RunsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- assistant_id: str,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[
- str,
- Literal[
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4.5-preview",
- "gpt-4.5-preview-2025-02-27",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- ],
- None,
- ]
- | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- thread: CreateThreadRequestParam | NotGiven = NOT_GIVEN,
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[run_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Create a thread and run it in one request.
-
- Args:
- assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
-
- instructions: Override the default system message of the assistant. This is useful for
- modifying the behavior on a per-run basis.
-
- max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run. If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- thread: Options to create a new thread. If no thread is provided when running a request,
- an empty thread will be created.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: Override the tools the assistant can use for this run. This is useful for
- modifying the behavior on a per-run basis.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
- control the intial context window of the run.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/threads/runs",
- body=maybe_transform(
- {
- "assistant_id": assistant_id,
- "instructions": instructions,
- "max_completion_tokens": max_completion_tokens,
- "max_prompt_tokens": max_prompt_tokens,
- "metadata": metadata,
- "model": model,
- "parallel_tool_calls": parallel_tool_calls,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "thread": thread,
- "tool_choice": tool_choice,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- "truncation_strategy": truncation_strategy,
- },
- run_create_params.RunCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- def retrieve(
- self,
- run_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Retrieves a run.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._get(
- f"/threads/{thread_id}/runs/{run_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- def update(
- self,
- run_id: str,
- *,
- thread_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Modifies a run.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._post(
- f"/threads/{thread_id}/runs/{run_id}",
- body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- def list(
- self,
- thread_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunListResponse:
- """
- Returns a list of runs belonging to a thread.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._get(
- f"/threads/{thread_id}/runs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- run_list_params.RunListParams,
- ),
- ),
- cast_to=RunListResponse,
- )
-
- def cancel(
- self,
- run_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Cancels a run that is `in_progress`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._post(
- f"/threads/{thread_id}/runs/{run_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- def create_run(
- self,
- thread_id: str,
- *,
- assistant_id: str,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
- additional_messages: Optional[Iterable[CreateMessageRequestParam]] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[str, AssistantSupportedModels, None] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_run_params.Tool]] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Create a run.
-
- Args:
- assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
-
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- additional_instructions: Appends additional instructions at the end of the instructions for the run. This
- is useful for modifying the behavior on a per-run basis without overriding other
- instructions.
-
- additional_messages: Adds additional messages to the thread before creating the run.
-
- instructions: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of
- the assistant. This is useful for modifying the behavior on a per-run basis.
-
- max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run. If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- tools: Override the tools the assistant can use for this run. This is useful for
- modifying the behavior on a per-run basis.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
- control the intial context window of the run.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._post(
- f"/threads/{thread_id}/runs",
- body=maybe_transform(
- {
- "assistant_id": assistant_id,
- "additional_instructions": additional_instructions,
- "additional_messages": additional_messages,
- "instructions": instructions,
- "max_completion_tokens": max_completion_tokens,
- "max_prompt_tokens": max_prompt_tokens,
- "metadata": metadata,
- "model": model,
- "parallel_tool_calls": parallel_tool_calls,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_p": top_p,
- "truncation_strategy": truncation_strategy,
- },
- run_create_run_params.RunCreateRunParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"include": include}, run_create_run_params.RunCreateRunParams),
- ),
- cast_to=RunObject,
- )
-
- def submit_tool_outputs(
- self,
- run_id: str,
- *,
- thread_id: str,
- tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- When a run has the `status: "requires_action"` and `required_action.type` is
- `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
- tool calls once they're all completed. All outputs must be submitted in a single
- request.
-
- Args:
- tool_outputs: A list of tools for which the outputs are being submitted.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._post(
- f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
- body=maybe_transform(
- {
- "tool_outputs": tool_outputs,
- "stream": stream,
- },
- run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
-
-class AsyncRunsResource(AsyncAPIResource):
- @cached_property
- def steps(self) -> AsyncStepsResource:
- return AsyncStepsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncRunsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncRunsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncRunsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncRunsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- assistant_id: str,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[
- str,
- Literal[
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4.5-preview",
- "gpt-4.5-preview-2025-02-27",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- ],
- None,
- ]
- | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- thread: CreateThreadRequestParam | NotGiven = NOT_GIVEN,
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[run_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Create a thread and run it in one request.
-
- Args:
- assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
-
- instructions: Override the default system message of the assistant. This is useful for
- modifying the behavior on a per-run basis.
-
- max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run. If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- thread: Options to create a new thread. If no thread is provided when running a request,
- an empty thread will be created.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: Override the tools the assistant can use for this run. This is useful for
- modifying the behavior on a per-run basis.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
- control the intial context window of the run.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/threads/runs",
- body=await async_maybe_transform(
- {
- "assistant_id": assistant_id,
- "instructions": instructions,
- "max_completion_tokens": max_completion_tokens,
- "max_prompt_tokens": max_prompt_tokens,
- "metadata": metadata,
- "model": model,
- "parallel_tool_calls": parallel_tool_calls,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "thread": thread,
- "tool_choice": tool_choice,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- "truncation_strategy": truncation_strategy,
- },
- run_create_params.RunCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- async def retrieve(
- self,
- run_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Retrieves a run.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._get(
- f"/threads/{thread_id}/runs/{run_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- async def update(
- self,
- run_id: str,
- *,
- thread_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Modifies a run.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._post(
- f"/threads/{thread_id}/runs/{run_id}",
- body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- async def list(
- self,
- thread_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunListResponse:
- """
- Returns a list of runs belonging to a thread.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._get(
- f"/threads/{thread_id}/runs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- run_list_params.RunListParams,
- ),
- ),
- cast_to=RunListResponse,
- )
-
- async def cancel(
- self,
- run_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Cancels a run that is `in_progress`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._post(
- f"/threads/{thread_id}/runs/{run_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- async def create_run(
- self,
- thread_id: str,
- *,
- assistant_id: str,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
- additional_messages: Optional[Iterable[CreateMessageRequestParam]] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[str, AssistantSupportedModels, None] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_run_params.Tool]] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Create a run.
-
- Args:
- assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
-
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- additional_instructions: Appends additional instructions at the end of the instructions for the run. This
- is useful for modifying the behavior on a per-run basis without overriding other
- instructions.
-
- additional_messages: Adds additional messages to the thread before creating the run.
-
- instructions: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of
- the assistant. This is useful for modifying the behavior on a per-run basis.
-
- max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run. If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- tools: Override the tools the assistant can use for this run. This is useful for
- modifying the behavior on a per-run basis.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
- control the intial context window of the run.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._post(
- f"/threads/{thread_id}/runs",
- body=await async_maybe_transform(
- {
- "assistant_id": assistant_id,
- "additional_instructions": additional_instructions,
- "additional_messages": additional_messages,
- "instructions": instructions,
- "max_completion_tokens": max_completion_tokens,
- "max_prompt_tokens": max_prompt_tokens,
- "metadata": metadata,
- "model": model,
- "parallel_tool_calls": parallel_tool_calls,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_p": top_p,
- "truncation_strategy": truncation_strategy,
- },
- run_create_run_params.RunCreateRunParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform({"include": include}, run_create_run_params.RunCreateRunParams),
- ),
- cast_to=RunObject,
- )
-
- async def submit_tool_outputs(
- self,
- run_id: str,
- *,
- thread_id: str,
- tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- When a run has the `status: "requires_action"` and `required_action.type` is
- `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
- tool calls once they're all completed. All outputs must be submitted in a single
- request.
-
- Args:
- tool_outputs: A list of tools for which the outputs are being submitted.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._post(
- f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
- body=await async_maybe_transform(
- {
- "tool_outputs": tool_outputs,
- "stream": stream,
- },
- run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
-
-class RunsResourceWithRawResponse:
- def __init__(self, runs: RunsResource) -> None:
- self._runs = runs
-
- self.create = to_raw_response_wrapper(
- runs.create,
- )
- self.retrieve = to_raw_response_wrapper(
- runs.retrieve,
- )
- self.update = to_raw_response_wrapper(
- runs.update,
- )
- self.list = to_raw_response_wrapper(
- runs.list,
- )
- self.cancel = to_raw_response_wrapper(
- runs.cancel,
- )
- self.create_run = to_raw_response_wrapper(
- runs.create_run,
- )
- self.submit_tool_outputs = to_raw_response_wrapper(
- runs.submit_tool_outputs,
- )
-
- @cached_property
- def steps(self) -> StepsResourceWithRawResponse:
- return StepsResourceWithRawResponse(self._runs.steps)
-
-
-class AsyncRunsResourceWithRawResponse:
- def __init__(self, runs: AsyncRunsResource) -> None:
- self._runs = runs
-
- self.create = async_to_raw_response_wrapper(
- runs.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- runs.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- runs.update,
- )
- self.list = async_to_raw_response_wrapper(
- runs.list,
- )
- self.cancel = async_to_raw_response_wrapper(
- runs.cancel,
- )
- self.create_run = async_to_raw_response_wrapper(
- runs.create_run,
- )
- self.submit_tool_outputs = async_to_raw_response_wrapper(
- runs.submit_tool_outputs,
- )
-
- @cached_property
- def steps(self) -> AsyncStepsResourceWithRawResponse:
- return AsyncStepsResourceWithRawResponse(self._runs.steps)
-
-
-class RunsResourceWithStreamingResponse:
- def __init__(self, runs: RunsResource) -> None:
- self._runs = runs
-
- self.create = to_streamed_response_wrapper(
- runs.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- runs.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- runs.update,
- )
- self.list = to_streamed_response_wrapper(
- runs.list,
- )
- self.cancel = to_streamed_response_wrapper(
- runs.cancel,
- )
- self.create_run = to_streamed_response_wrapper(
- runs.create_run,
- )
- self.submit_tool_outputs = to_streamed_response_wrapper(
- runs.submit_tool_outputs,
- )
-
- @cached_property
- def steps(self) -> StepsResourceWithStreamingResponse:
- return StepsResourceWithStreamingResponse(self._runs.steps)
-
-
-class AsyncRunsResourceWithStreamingResponse:
- def __init__(self, runs: AsyncRunsResource) -> None:
- self._runs = runs
-
- self.create = async_to_streamed_response_wrapper(
- runs.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- runs.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- runs.update,
- )
- self.list = async_to_streamed_response_wrapper(
- runs.list,
- )
- self.cancel = async_to_streamed_response_wrapper(
- runs.cancel,
- )
- self.create_run = async_to_streamed_response_wrapper(
- runs.create_run,
- )
- self.submit_tool_outputs = async_to_streamed_response_wrapper(
- runs.submit_tool_outputs,
- )
-
- @cached_property
- def steps(self) -> AsyncStepsResourceWithStreamingResponse:
- return AsyncStepsResourceWithStreamingResponse(self._runs.steps)
diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py b/src/digitalocean_genai_sdk/resources/threads/runs/steps.py
deleted file mode 100644
index 2b5ffd09..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py
+++ /dev/null
@@ -1,375 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.threads.runs import step_list_params, step_retrieve_params
-from ....types.threads.runs.run_step_object import RunStepObject
-from ....types.threads.runs.step_list_response import StepListResponse
-
-__all__ = ["StepsResource", "AsyncStepsResource"]
-
-
-class StepsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> StepsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return StepsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> StepsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return StepsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- step_id: str,
- *,
- thread_id: str,
- run_id: str,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunStepObject:
- """
- Retrieves a run step.
-
- Args:
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- if not step_id:
- raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
- return self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
- ),
- cast_to=RunStepObject,
- )
-
- def list(
- self,
- run_id: str,
- *,
- thread_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> StepListResponse:
- """
- Returns a list of run steps belonging to a run.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "include": include,
- "limit": limit,
- "order": order,
- },
- step_list_params.StepListParams,
- ),
- ),
- cast_to=StepListResponse,
- )
-
-
-class AsyncStepsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncStepsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncStepsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncStepsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncStepsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- step_id: str,
- *,
- thread_id: str,
- run_id: str,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunStepObject:
- """
- Retrieves a run step.
-
- Args:
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- if not step_id:
- raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
- return await self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
- ),
- cast_to=RunStepObject,
- )
-
- async def list(
- self,
- run_id: str,
- *,
- thread_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> StepListResponse:
- """
- Returns a list of run steps belonging to a run.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "include": include,
- "limit": limit,
- "order": order,
- },
- step_list_params.StepListParams,
- ),
- ),
- cast_to=StepListResponse,
- )
-
-
-class StepsResourceWithRawResponse:
- def __init__(self, steps: StepsResource) -> None:
- self._steps = steps
-
- self.retrieve = to_raw_response_wrapper(
- steps.retrieve,
- )
- self.list = to_raw_response_wrapper(
- steps.list,
- )
-
-
-class AsyncStepsResourceWithRawResponse:
- def __init__(self, steps: AsyncStepsResource) -> None:
- self._steps = steps
-
- self.retrieve = async_to_raw_response_wrapper(
- steps.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- steps.list,
- )
-
-
-class StepsResourceWithStreamingResponse:
- def __init__(self, steps: StepsResource) -> None:
- self._steps = steps
-
- self.retrieve = to_streamed_response_wrapper(
- steps.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- steps.list,
- )
-
-
-class AsyncStepsResourceWithStreamingResponse:
- def __init__(self, steps: AsyncStepsResource) -> None:
- self._steps = steps
-
- self.retrieve = async_to_streamed_response_wrapper(
- steps.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- steps.list,
- )
diff --git a/src/digitalocean_genai_sdk/resources/threads/threads.py b/src/digitalocean_genai_sdk/resources/threads/threads.py
deleted file mode 100644
index 64062ffb..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/threads.py
+++ /dev/null
@@ -1,553 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Iterable, Optional
-
-import httpx
-
-from ...types import thread_create_params, thread_update_params
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from .messages import (
- MessagesResource,
- AsyncMessagesResource,
- MessagesResourceWithRawResponse,
- AsyncMessagesResourceWithRawResponse,
- MessagesResourceWithStreamingResponse,
- AsyncMessagesResourceWithStreamingResponse,
-)
-from ..._compat import cached_property
-from .runs.runs import (
- RunsResource,
- AsyncRunsResource,
- RunsResourceWithRawResponse,
- AsyncRunsResourceWithRawResponse,
- RunsResourceWithStreamingResponse,
- AsyncRunsResourceWithStreamingResponse,
-)
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.thread_object import ThreadObject
-from ...types.thread_delete_response import ThreadDeleteResponse
-from ...types.threads.create_message_request_param import CreateMessageRequestParam
-
-__all__ = ["ThreadsResource", "AsyncThreadsResource"]
-
-
-class ThreadsResource(SyncAPIResource):
- @cached_property
- def runs(self) -> RunsResource:
- return RunsResource(self._client)
-
- @cached_property
- def messages(self) -> MessagesResource:
- return MessagesResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ThreadsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ThreadsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ThreadsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ThreadsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- messages: Iterable[CreateMessageRequestParam] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Create a thread.
-
- Args:
- messages: A list of [messages](/docs/api-reference/messages) to start the thread with.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- tool_resources: A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/threads",
- body=maybe_transform(
- {
- "messages": messages,
- "metadata": metadata,
- "tool_resources": tool_resources,
- },
- thread_create_params.ThreadCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- def retrieve(
- self,
- thread_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Retrieves a thread.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._get(
- f"/threads/{thread_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- def update(
- self,
- thread_id: str,
- *,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Modifies a thread.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- tool_resources: A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._post(
- f"/threads/{thread_id}",
- body=maybe_transform(
- {
- "metadata": metadata,
- "tool_resources": tool_resources,
- },
- thread_update_params.ThreadUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- def delete(
- self,
- thread_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadDeleteResponse:
- """
- Delete a thread.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._delete(
- f"/threads/{thread_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadDeleteResponse,
- )
-
-
-class AsyncThreadsResource(AsyncAPIResource):
- @cached_property
- def runs(self) -> AsyncRunsResource:
- return AsyncRunsResource(self._client)
-
- @cached_property
- def messages(self) -> AsyncMessagesResource:
- return AsyncMessagesResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncThreadsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncThreadsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncThreadsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncThreadsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- messages: Iterable[CreateMessageRequestParam] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Create a thread.
-
- Args:
- messages: A list of [messages](/docs/api-reference/messages) to start the thread with.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- tool_resources: A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/threads",
- body=await async_maybe_transform(
- {
- "messages": messages,
- "metadata": metadata,
- "tool_resources": tool_resources,
- },
- thread_create_params.ThreadCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- async def retrieve(
- self,
- thread_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Retrieves a thread.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._get(
- f"/threads/{thread_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- async def update(
- self,
- thread_id: str,
- *,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Modifies a thread.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- tool_resources: A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._post(
- f"/threads/{thread_id}",
- body=await async_maybe_transform(
- {
- "metadata": metadata,
- "tool_resources": tool_resources,
- },
- thread_update_params.ThreadUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- async def delete(
- self,
- thread_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadDeleteResponse:
- """
- Delete a thread.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._delete(
- f"/threads/{thread_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadDeleteResponse,
- )
-
-
-class ThreadsResourceWithRawResponse:
- def __init__(self, threads: ThreadsResource) -> None:
- self._threads = threads
-
- self.create = to_raw_response_wrapper(
- threads.create,
- )
- self.retrieve = to_raw_response_wrapper(
- threads.retrieve,
- )
- self.update = to_raw_response_wrapper(
- threads.update,
- )
- self.delete = to_raw_response_wrapper(
- threads.delete,
- )
-
- @cached_property
- def runs(self) -> RunsResourceWithRawResponse:
- return RunsResourceWithRawResponse(self._threads.runs)
-
- @cached_property
- def messages(self) -> MessagesResourceWithRawResponse:
- return MessagesResourceWithRawResponse(self._threads.messages)
-
-
-class AsyncThreadsResourceWithRawResponse:
- def __init__(self, threads: AsyncThreadsResource) -> None:
- self._threads = threads
-
- self.create = async_to_raw_response_wrapper(
- threads.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- threads.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- threads.update,
- )
- self.delete = async_to_raw_response_wrapper(
- threads.delete,
- )
-
- @cached_property
- def runs(self) -> AsyncRunsResourceWithRawResponse:
- return AsyncRunsResourceWithRawResponse(self._threads.runs)
-
- @cached_property
- def messages(self) -> AsyncMessagesResourceWithRawResponse:
- return AsyncMessagesResourceWithRawResponse(self._threads.messages)
-
-
-class ThreadsResourceWithStreamingResponse:
- def __init__(self, threads: ThreadsResource) -> None:
- self._threads = threads
-
- self.create = to_streamed_response_wrapper(
- threads.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- threads.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- threads.update,
- )
- self.delete = to_streamed_response_wrapper(
- threads.delete,
- )
-
- @cached_property
- def runs(self) -> RunsResourceWithStreamingResponse:
- return RunsResourceWithStreamingResponse(self._threads.runs)
-
- @cached_property
- def messages(self) -> MessagesResourceWithStreamingResponse:
- return MessagesResourceWithStreamingResponse(self._threads.messages)
-
-
-class AsyncThreadsResourceWithStreamingResponse:
- def __init__(self, threads: AsyncThreadsResource) -> None:
- self._threads = threads
-
- self.create = async_to_streamed_response_wrapper(
- threads.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- threads.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- threads.update,
- )
- self.delete = async_to_streamed_response_wrapper(
- threads.delete,
- )
-
- @cached_property
- def runs(self) -> AsyncRunsResourceWithStreamingResponse:
- return AsyncRunsResourceWithStreamingResponse(self._threads.runs)
-
- @cached_property
- def messages(self) -> AsyncMessagesResourceWithStreamingResponse:
- return AsyncMessagesResourceWithStreamingResponse(self._threads.messages)
diff --git a/src/digitalocean_genai_sdk/resources/uploads.py b/src/digitalocean_genai_sdk/resources/uploads.py
deleted file mode 100644
index 30ba91b5..00000000
--- a/src/digitalocean_genai_sdk/resources/uploads.py
+++ /dev/null
@@ -1,573 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Mapping, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import upload_create_params, upload_add_part_params, upload_complete_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.upload import Upload
-from ..types.upload_add_part_response import UploadAddPartResponse
-
-__all__ = ["UploadsResource", "AsyncUploadsResource"]
-
-
-class UploadsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> UploadsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return UploadsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> UploadsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return UploadsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- bytes: int,
- filename: str,
- mime_type: str,
- purpose: Literal["assistants", "batch", "fine-tune", "vision"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """
- Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that
- you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an
- Upload can accept at most 8 GB in total and expires after an hour after you
- create it.
-
- Once you complete the Upload, we will create a
- [File](/docs/api-reference/files/object) object that contains all the parts you
- uploaded. This File is usable in the rest of our platform as a regular File
- object.
-
- For certain `purpose` values, the correct `mime_type` must be specified. Please
- refer to documentation for the
- [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files).
-
- For guidance on the proper filename extensions for each purpose, please follow
- the documentation on [creating a File](/docs/api-reference/files/create).
-
- Args:
- bytes: The number of bytes in the file you are uploading.
-
- filename: The name of the file to upload.
-
- mime_type: The MIME type of the file.
-
- This must fall within the supported MIME types for your file purpose. See the
- supported MIME types for assistants and vision.
-
- purpose: The intended purpose of the uploaded file.
-
- See the
- [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/uploads",
- body=maybe_transform(
- {
- "bytes": bytes,
- "filename": filename,
- "mime_type": mime_type,
- "purpose": purpose,
- },
- upload_create_params.UploadCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
- def add_part(
- self,
- upload_id: str,
- *,
- data: FileTypes,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UploadAddPartResponse:
- """
- Adds a [Part](/docs/api-reference/uploads/part-object) to an
- [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk
- of bytes from the file you are trying to upload.
-
- Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
- maximum of 8 GB.
-
- It is possible to add multiple Parts in parallel. You can decide the intended
- order of the Parts when you
- [complete the Upload](/docs/api-reference/uploads/complete).
-
- Args:
- data: The chunk of bytes for this Part.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- body = deepcopy_minimal({"data": data})
- files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- f"/uploads/{upload_id}/parts",
- body=maybe_transform(body, upload_add_part_params.UploadAddPartParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UploadAddPartResponse,
- )
-
- def cancel(
- self,
- upload_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """Cancels the Upload.
-
- No Parts may be added after an Upload is cancelled.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- return self._post(
- f"/uploads/{upload_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
- def complete(
- self,
- upload_id: str,
- *,
- part_ids: List[str],
- md5: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """
- Completes the [Upload](/docs/api-reference/uploads/object).
-
- Within the returned Upload object, there is a nested
- [File](/docs/api-reference/files/object) object that is ready to use in the rest
- of the platform.
-
- You can specify the order of the Parts by passing in an ordered list of the Part
- IDs.
-
- The number of bytes uploaded upon completion must match the number of bytes
- initially specified when creating the Upload object. No Parts may be added after
- an Upload is completed.
-
- Args:
- part_ids: The ordered list of Part IDs.
-
- md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
- matches what you expect.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- return self._post(
- f"/uploads/{upload_id}/complete",
- body=maybe_transform(
- {
- "part_ids": part_ids,
- "md5": md5,
- },
- upload_complete_params.UploadCompleteParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
-
-class AsyncUploadsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncUploadsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncUploadsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncUploadsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncUploadsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- bytes: int,
- filename: str,
- mime_type: str,
- purpose: Literal["assistants", "batch", "fine-tune", "vision"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """
- Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that
- you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an
- Upload can accept at most 8 GB in total and expires after an hour after you
- create it.
-
- Once you complete the Upload, we will create a
- [File](/docs/api-reference/files/object) object that contains all the parts you
- uploaded. This File is usable in the rest of our platform as a regular File
- object.
-
- For certain `purpose` values, the correct `mime_type` must be specified. Please
- refer to documentation for the
- [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files).
-
- For guidance on the proper filename extensions for each purpose, please follow
- the documentation on [creating a File](/docs/api-reference/files/create).
-
- Args:
- bytes: The number of bytes in the file you are uploading.
-
- filename: The name of the file to upload.
-
- mime_type: The MIME type of the file.
-
- This must fall within the supported MIME types for your file purpose. See the
- supported MIME types for assistants and vision.
-
- purpose: The intended purpose of the uploaded file.
-
- See the
- [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/uploads",
- body=await async_maybe_transform(
- {
- "bytes": bytes,
- "filename": filename,
- "mime_type": mime_type,
- "purpose": purpose,
- },
- upload_create_params.UploadCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
- async def add_part(
- self,
- upload_id: str,
- *,
- data: FileTypes,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UploadAddPartResponse:
- """
- Adds a [Part](/docs/api-reference/uploads/part-object) to an
- [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk
- of bytes from the file you are trying to upload.
-
- Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
- maximum of 8 GB.
-
- It is possible to add multiple Parts in parallel. You can decide the intended
- order of the Parts when you
- [complete the Upload](/docs/api-reference/uploads/complete).
-
- Args:
- data: The chunk of bytes for this Part.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- body = deepcopy_minimal({"data": data})
- files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- f"/uploads/{upload_id}/parts",
- body=await async_maybe_transform(body, upload_add_part_params.UploadAddPartParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UploadAddPartResponse,
- )
-
- async def cancel(
- self,
- upload_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """Cancels the Upload.
-
- No Parts may be added after an Upload is cancelled.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- return await self._post(
- f"/uploads/{upload_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
- async def complete(
- self,
- upload_id: str,
- *,
- part_ids: List[str],
- md5: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """
- Completes the [Upload](/docs/api-reference/uploads/object).
-
- Within the returned Upload object, there is a nested
- [File](/docs/api-reference/files/object) object that is ready to use in the rest
- of the platform.
-
- You can specify the order of the Parts by passing in an ordered list of the Part
- IDs.
-
- The number of bytes uploaded upon completion must match the number of bytes
- initially specified when creating the Upload object. No Parts may be added after
- an Upload is completed.
-
- Args:
- part_ids: The ordered list of Part IDs.
-
- md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
- matches what you expect.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- return await self._post(
- f"/uploads/{upload_id}/complete",
- body=await async_maybe_transform(
- {
- "part_ids": part_ids,
- "md5": md5,
- },
- upload_complete_params.UploadCompleteParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
-
-class UploadsResourceWithRawResponse:
- def __init__(self, uploads: UploadsResource) -> None:
- self._uploads = uploads
-
- self.create = to_raw_response_wrapper(
- uploads.create,
- )
- self.add_part = to_raw_response_wrapper(
- uploads.add_part,
- )
- self.cancel = to_raw_response_wrapper(
- uploads.cancel,
- )
- self.complete = to_raw_response_wrapper(
- uploads.complete,
- )
-
-
-class AsyncUploadsResourceWithRawResponse:
- def __init__(self, uploads: AsyncUploadsResource) -> None:
- self._uploads = uploads
-
- self.create = async_to_raw_response_wrapper(
- uploads.create,
- )
- self.add_part = async_to_raw_response_wrapper(
- uploads.add_part,
- )
- self.cancel = async_to_raw_response_wrapper(
- uploads.cancel,
- )
- self.complete = async_to_raw_response_wrapper(
- uploads.complete,
- )
-
-
-class UploadsResourceWithStreamingResponse:
- def __init__(self, uploads: UploadsResource) -> None:
- self._uploads = uploads
-
- self.create = to_streamed_response_wrapper(
- uploads.create,
- )
- self.add_part = to_streamed_response_wrapper(
- uploads.add_part,
- )
- self.cancel = to_streamed_response_wrapper(
- uploads.cancel,
- )
- self.complete = to_streamed_response_wrapper(
- uploads.complete,
- )
-
-
-class AsyncUploadsResourceWithStreamingResponse:
- def __init__(self, uploads: AsyncUploadsResource) -> None:
- self._uploads = uploads
-
- self.create = async_to_streamed_response_wrapper(
- uploads.create,
- )
- self.add_part = async_to_streamed_response_wrapper(
- uploads.add_part,
- )
- self.cancel = async_to_streamed_response_wrapper(
- uploads.cancel,
- )
- self.complete = async_to_streamed_response_wrapper(
- uploads.complete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py b/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py
deleted file mode 100644
index a754f147..00000000
--- a/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .files import (
- FilesResource,
- AsyncFilesResource,
- FilesResourceWithRawResponse,
- AsyncFilesResourceWithRawResponse,
- FilesResourceWithStreamingResponse,
- AsyncFilesResourceWithStreamingResponse,
-)
-from .file_batches import (
- FileBatchesResource,
- AsyncFileBatchesResource,
- FileBatchesResourceWithRawResponse,
- AsyncFileBatchesResourceWithRawResponse,
- FileBatchesResourceWithStreamingResponse,
- AsyncFileBatchesResourceWithStreamingResponse,
-)
-from .vector_stores import (
- VectorStoresResource,
- AsyncVectorStoresResource,
- VectorStoresResourceWithRawResponse,
- AsyncVectorStoresResourceWithRawResponse,
- VectorStoresResourceWithStreamingResponse,
- AsyncVectorStoresResourceWithStreamingResponse,
-)
-
-__all__ = [
- "FileBatchesResource",
- "AsyncFileBatchesResource",
- "FileBatchesResourceWithRawResponse",
- "AsyncFileBatchesResourceWithRawResponse",
- "FileBatchesResourceWithStreamingResponse",
- "AsyncFileBatchesResourceWithStreamingResponse",
- "FilesResource",
- "AsyncFilesResource",
- "FilesResourceWithRawResponse",
- "AsyncFilesResourceWithRawResponse",
- "FilesResourceWithStreamingResponse",
- "AsyncFilesResourceWithStreamingResponse",
- "VectorStoresResource",
- "AsyncVectorStoresResource",
- "VectorStoresResourceWithRawResponse",
- "AsyncVectorStoresResourceWithRawResponse",
- "VectorStoresResourceWithStreamingResponse",
- "AsyncVectorStoresResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py b/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py
deleted file mode 100644
index 0c4334ce..00000000
--- a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py
+++ /dev/null
@@ -1,544 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.vector_stores import ChunkingStrategyRequestParam, file_batch_create_params, file_batch_list_files_params
-from ...types.vector_stores.vector_store_file_batch_object import VectorStoreFileBatchObject
-from ...types.vector_stores.chunking_strategy_request_param import ChunkingStrategyRequestParam
-from ...types.vector_stores.list_vector_store_files_response import ListVectorStoreFilesResponse
-
-__all__ = ["FileBatchesResource", "AsyncFileBatchesResource"]
-
-
-class FileBatchesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> FileBatchesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return FileBatchesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FileBatchesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return FileBatchesResourceWithStreamingResponse(self)
-
- def create(
- self,
- vector_store_id: str,
- *,
- file_ids: List[str],
- attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
- chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """
- Create a vector store file batch.
-
- Args:
- file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should
- use. Useful for tools like `file_search` that can access files.
-
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/file_batches",
- body=maybe_transform(
- {
- "file_ids": file_ids,
- "attributes": attributes,
- "chunking_strategy": chunking_strategy,
- },
- file_batch_create_params.FileBatchCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- def retrieve(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """
- Retrieves a vector store file batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- def cancel(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """Cancel a vector store file batch.
-
- This attempts to cancel the processing of
- files in this batch as soon as possible.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- def list_files(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListVectorStoreFilesResponse:
- """
- Returns a list of vector store files in a batch.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "filter": filter,
- "limit": limit,
- "order": order,
- },
- file_batch_list_files_params.FileBatchListFilesParams,
- ),
- ),
- cast_to=ListVectorStoreFilesResponse,
- )
-
-
-class AsyncFileBatchesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncFileBatchesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncFileBatchesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFileBatchesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncFileBatchesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- vector_store_id: str,
- *,
- file_ids: List[str],
- attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
- chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """
- Create a vector store file batch.
-
- Args:
- file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should
- use. Useful for tools like `file_search` that can access files.
-
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/file_batches",
- body=await async_maybe_transform(
- {
- "file_ids": file_ids,
- "attributes": attributes,
- "chunking_strategy": chunking_strategy,
- },
- file_batch_create_params.FileBatchCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- async def retrieve(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """
- Retrieves a vector store file batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- async def cancel(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """Cancel a vector store file batch.
-
- This attempts to cancel the processing of
- files in this batch as soon as possible.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- async def list_files(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListVectorStoreFilesResponse:
- """
- Returns a list of vector store files in a batch.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "filter": filter,
- "limit": limit,
- "order": order,
- },
- file_batch_list_files_params.FileBatchListFilesParams,
- ),
- ),
- cast_to=ListVectorStoreFilesResponse,
- )
-
-
-class FileBatchesResourceWithRawResponse:
- def __init__(self, file_batches: FileBatchesResource) -> None:
- self._file_batches = file_batches
-
- self.create = to_raw_response_wrapper(
- file_batches.create,
- )
- self.retrieve = to_raw_response_wrapper(
- file_batches.retrieve,
- )
- self.cancel = to_raw_response_wrapper(
- file_batches.cancel,
- )
- self.list_files = to_raw_response_wrapper(
- file_batches.list_files,
- )
-
-
-class AsyncFileBatchesResourceWithRawResponse:
- def __init__(self, file_batches: AsyncFileBatchesResource) -> None:
- self._file_batches = file_batches
-
- self.create = async_to_raw_response_wrapper(
- file_batches.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- file_batches.retrieve,
- )
- self.cancel = async_to_raw_response_wrapper(
- file_batches.cancel,
- )
- self.list_files = async_to_raw_response_wrapper(
- file_batches.list_files,
- )
-
-
-class FileBatchesResourceWithStreamingResponse:
- def __init__(self, file_batches: FileBatchesResource) -> None:
- self._file_batches = file_batches
-
- self.create = to_streamed_response_wrapper(
- file_batches.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- file_batches.retrieve,
- )
- self.cancel = to_streamed_response_wrapper(
- file_batches.cancel,
- )
- self.list_files = to_streamed_response_wrapper(
- file_batches.list_files,
- )
-
-
-class AsyncFileBatchesResourceWithStreamingResponse:
- def __init__(self, file_batches: AsyncFileBatchesResource) -> None:
- self._file_batches = file_batches
-
- self.create = async_to_streamed_response_wrapper(
- file_batches.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- file_batches.retrieve,
- )
- self.cancel = async_to_streamed_response_wrapper(
- file_batches.cancel,
- )
- self.list_files = async_to_streamed_response_wrapper(
- file_batches.list_files,
- )
diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/files.py b/src/digitalocean_genai_sdk/resources/vector_stores/files.py
deleted file mode 100644
index c40d5b11..00000000
--- a/src/digitalocean_genai_sdk/resources/vector_stores/files.py
+++ /dev/null
@@ -1,733 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.vector_stores import (
- ChunkingStrategyRequestParam,
- file_list_params,
- file_create_params,
- file_update_params,
-)
-from ...types.vector_stores.file_delete_response import FileDeleteResponse
-from ...types.vector_stores.vector_store_file_object import VectorStoreFileObject
-from ...types.vector_stores.file_retrieve_content_response import FileRetrieveContentResponse
-from ...types.vector_stores.chunking_strategy_request_param import ChunkingStrategyRequestParam
-from ...types.vector_stores.list_vector_store_files_response import ListVectorStoreFilesResponse
-
-__all__ = ["FilesResource", "AsyncFilesResource"]
-
-
-class FilesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> FilesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return FilesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FilesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return FilesResourceWithStreamingResponse(self)
-
- def create(
- self,
- vector_store_id: str,
- *,
- file_id: str,
- attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
- chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Create a vector store file by attaching a [File](/docs/api-reference/files) to a
- [vector store](/docs/api-reference/vector-stores/object).
-
- Args:
- file_id: A [File](/docs/api-reference/files) ID that the vector store should use. Useful
- for tools like `file_search` that can access files.
-
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/files",
- body=maybe_transform(
- {
- "file_id": file_id,
- "attributes": attributes,
- "chunking_strategy": chunking_strategy,
- },
- file_create_params.FileCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- def retrieve(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Retrieves a vector store file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- def update(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- attributes: Optional[Dict[str, Union[str, float, bool]]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Update attributes on a vector store file.
-
- Args:
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- def list(
- self,
- vector_store_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListVectorStoreFilesResponse:
- """
- Returns a list of vector store files.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "filter": filter,
- "limit": limit,
- "order": order,
- },
- file_list_params.FileListParams,
- ),
- ),
- cast_to=ListVectorStoreFilesResponse,
- )
-
- def delete(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """Delete a vector store file.
-
- This will remove the file from the vector store but
- the file itself will not be deleted. To delete the file, use the
- [delete file](/docs/api-reference/files/delete) endpoint.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._delete(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileDeleteResponse,
- )
-
- def retrieve_content(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileRetrieveContentResponse:
- """
- Retrieve the parsed contents of a vector store file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}/content",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileRetrieveContentResponse,
- )
-
-
-class AsyncFilesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncFilesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncFilesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncFilesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- vector_store_id: str,
- *,
- file_id: str,
- attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
- chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Create a vector store file by attaching a [File](/docs/api-reference/files) to a
- [vector store](/docs/api-reference/vector-stores/object).
-
- Args:
- file_id: A [File](/docs/api-reference/files) ID that the vector store should use. Useful
- for tools like `file_search` that can access files.
-
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/files",
- body=await async_maybe_transform(
- {
- "file_id": file_id,
- "attributes": attributes,
- "chunking_strategy": chunking_strategy,
- },
- file_create_params.FileCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- async def retrieve(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Retrieves a vector store file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- async def update(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- attributes: Optional[Dict[str, Union[str, float, bool]]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Update attributes on a vector store file.
-
- Args:
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- async def list(
- self,
- vector_store_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListVectorStoreFilesResponse:
- """
- Returns a list of vector store files.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "filter": filter,
- "limit": limit,
- "order": order,
- },
- file_list_params.FileListParams,
- ),
- ),
- cast_to=ListVectorStoreFilesResponse,
- )
-
- async def delete(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """Delete a vector store file.
-
- This will remove the file from the vector store but
- the file itself will not be deleted. To delete the file, use the
- [delete file](/docs/api-reference/files/delete) endpoint.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._delete(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileDeleteResponse,
- )
-
- async def retrieve_content(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileRetrieveContentResponse:
- """
- Retrieve the parsed contents of a vector store file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}/content",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileRetrieveContentResponse,
- )
-
-
-class FilesResourceWithRawResponse:
- def __init__(self, files: FilesResource) -> None:
- self._files = files
-
- self.create = to_raw_response_wrapper(
- files.create,
- )
- self.retrieve = to_raw_response_wrapper(
- files.retrieve,
- )
- self.update = to_raw_response_wrapper(
- files.update,
- )
- self.list = to_raw_response_wrapper(
- files.list,
- )
- self.delete = to_raw_response_wrapper(
- files.delete,
- )
- self.retrieve_content = to_raw_response_wrapper(
- files.retrieve_content,
- )
-
-
-class AsyncFilesResourceWithRawResponse:
- def __init__(self, files: AsyncFilesResource) -> None:
- self._files = files
-
- self.create = async_to_raw_response_wrapper(
- files.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- files.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- files.update,
- )
- self.list = async_to_raw_response_wrapper(
- files.list,
- )
- self.delete = async_to_raw_response_wrapper(
- files.delete,
- )
- self.retrieve_content = async_to_raw_response_wrapper(
- files.retrieve_content,
- )
-
-
-class FilesResourceWithStreamingResponse:
- def __init__(self, files: FilesResource) -> None:
- self._files = files
-
- self.create = to_streamed_response_wrapper(
- files.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- files.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- files.update,
- )
- self.list = to_streamed_response_wrapper(
- files.list,
- )
- self.delete = to_streamed_response_wrapper(
- files.delete,
- )
- self.retrieve_content = to_streamed_response_wrapper(
- files.retrieve_content,
- )
-
-
-class AsyncFilesResourceWithStreamingResponse:
- def __init__(self, files: AsyncFilesResource) -> None:
- self._files = files
-
- self.create = async_to_streamed_response_wrapper(
- files.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- files.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- files.update,
- )
- self.list = async_to_streamed_response_wrapper(
- files.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- files.delete,
- )
- self.retrieve_content = async_to_streamed_response_wrapper(
- files.retrieve_content,
- )
diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py b/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py
deleted file mode 100644
index 8ad572ea..00000000
--- a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py
+++ /dev/null
@@ -1,847 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from .files import (
- FilesResource,
- AsyncFilesResource,
- FilesResourceWithRawResponse,
- AsyncFilesResourceWithRawResponse,
- FilesResourceWithStreamingResponse,
- AsyncFilesResourceWithStreamingResponse,
-)
-from ...types import (
- vector_store_list_params,
- vector_store_create_params,
- vector_store_search_params,
- vector_store_update_params,
-)
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .file_batches import (
- FileBatchesResource,
- AsyncFileBatchesResource,
- FileBatchesResourceWithRawResponse,
- AsyncFileBatchesResourceWithRawResponse,
- FileBatchesResourceWithStreamingResponse,
- AsyncFileBatchesResourceWithStreamingResponse,
-)
-from ..._base_client import make_request_options
-from ...types.vector_store_object import VectorStoreObject
-from ...types.vector_store_list_response import VectorStoreListResponse
-from ...types.vector_store_delete_response import VectorStoreDeleteResponse
-from ...types.vector_store_search_response import VectorStoreSearchResponse
-from ...types.vector_store_expiration_after_param import VectorStoreExpirationAfterParam
-
-__all__ = ["VectorStoresResource", "AsyncVectorStoresResource"]
-
-
-class VectorStoresResource(SyncAPIResource):
- @cached_property
- def file_batches(self) -> FileBatchesResource:
- return FileBatchesResource(self._client)
-
- @cached_property
- def files(self) -> FilesResource:
- return FilesResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> VectorStoresResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return VectorStoresResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> VectorStoresResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return VectorStoresResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
- expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN,
- file_ids: List[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Create a vector store.
-
- Args:
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy. Only applicable if `file_ids` is non-empty.
-
- expires_after: The expiration policy for a vector store.
-
- file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should
- use. Useful for tools like `file_search` that can access files.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the vector store.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/vector_stores",
- body=maybe_transform(
- {
- "chunking_strategy": chunking_strategy,
- "expires_after": expires_after,
- "file_ids": file_ids,
- "metadata": metadata,
- "name": name,
- },
- vector_store_create_params.VectorStoreCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- def retrieve(
- self,
- vector_store_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Retrieves a vector store.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- def update(
- self,
- vector_store_id: str,
- *,
- expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Modifies a vector store.
-
- Args:
- expires_after: The expiration policy for a vector store.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the vector store.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}",
- body=maybe_transform(
- {
- "expires_after": expires_after,
- "metadata": metadata,
- "name": name,
- },
- vector_store_update_params.VectorStoreUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreListResponse:
- """Returns a list of vector stores.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/vector_stores",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- vector_store_list_params.VectorStoreListParams,
- ),
- ),
- cast_to=VectorStoreListResponse,
- )
-
- def delete(
- self,
- vector_store_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreDeleteResponse:
- """
- Delete a vector store.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._delete(
- f"/vector_stores/{vector_store_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreDeleteResponse,
- )
-
- def search(
- self,
- vector_store_id: str,
- *,
- query: Union[str, List[str]],
- filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
- max_num_results: int | NotGiven = NOT_GIVEN,
- ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
- rewrite_query: bool | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreSearchResponse:
- """
- Search a vector store for relevant chunks based on a query and file attributes
- filter.
-
- Args:
- query: A query string for a search
-
- filters: A filter to apply based on file attributes.
-
- max_num_results: The maximum number of results to return. This number should be between 1 and 50
- inclusive.
-
- ranking_options: Ranking options for search.
-
- rewrite_query: Whether to rewrite the natural language query for vector search.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/search",
- body=maybe_transform(
- {
- "query": query,
- "filters": filters,
- "max_num_results": max_num_results,
- "ranking_options": ranking_options,
- "rewrite_query": rewrite_query,
- },
- vector_store_search_params.VectorStoreSearchParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreSearchResponse,
- )
-
-
-class AsyncVectorStoresResource(AsyncAPIResource):
- @cached_property
- def file_batches(self) -> AsyncFileBatchesResource:
- return AsyncFileBatchesResource(self._client)
-
- @cached_property
- def files(self) -> AsyncFilesResource:
- return AsyncFilesResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncVectorStoresResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncVectorStoresResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncVectorStoresResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncVectorStoresResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
- expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN,
- file_ids: List[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Create a vector store.
-
- Args:
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy. Only applicable if `file_ids` is non-empty.
-
- expires_after: The expiration policy for a vector store.
-
- file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should
- use. Useful for tools like `file_search` that can access files.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the vector store.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/vector_stores",
- body=await async_maybe_transform(
- {
- "chunking_strategy": chunking_strategy,
- "expires_after": expires_after,
- "file_ids": file_ids,
- "metadata": metadata,
- "name": name,
- },
- vector_store_create_params.VectorStoreCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- async def retrieve(
- self,
- vector_store_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Retrieves a vector store.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- async def update(
- self,
- vector_store_id: str,
- *,
- expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Modifies a vector store.
-
- Args:
- expires_after: The expiration policy for a vector store.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the vector store.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}",
- body=await async_maybe_transform(
- {
- "expires_after": expires_after,
- "metadata": metadata,
- "name": name,
- },
- vector_store_update_params.VectorStoreUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreListResponse:
- """Returns a list of vector stores.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/vector_stores",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- vector_store_list_params.VectorStoreListParams,
- ),
- ),
- cast_to=VectorStoreListResponse,
- )
-
- async def delete(
- self,
- vector_store_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreDeleteResponse:
- """
- Delete a vector store.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._delete(
- f"/vector_stores/{vector_store_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreDeleteResponse,
- )
-
- async def search(
- self,
- vector_store_id: str,
- *,
- query: Union[str, List[str]],
- filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
- max_num_results: int | NotGiven = NOT_GIVEN,
- ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
- rewrite_query: bool | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreSearchResponse:
- """
- Search a vector store for relevant chunks based on a query and file attributes
- filter.
-
- Args:
- query: A query string for a search
-
- filters: A filter to apply based on file attributes.
-
- max_num_results: The maximum number of results to return. This number should be between 1 and 50
- inclusive.
-
- ranking_options: Ranking options for search.
-
- rewrite_query: Whether to rewrite the natural language query for vector search.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/search",
- body=await async_maybe_transform(
- {
- "query": query,
- "filters": filters,
- "max_num_results": max_num_results,
- "ranking_options": ranking_options,
- "rewrite_query": rewrite_query,
- },
- vector_store_search_params.VectorStoreSearchParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreSearchResponse,
- )
-
-
-class VectorStoresResourceWithRawResponse:
- def __init__(self, vector_stores: VectorStoresResource) -> None:
- self._vector_stores = vector_stores
-
- self.create = to_raw_response_wrapper(
- vector_stores.create,
- )
- self.retrieve = to_raw_response_wrapper(
- vector_stores.retrieve,
- )
- self.update = to_raw_response_wrapper(
- vector_stores.update,
- )
- self.list = to_raw_response_wrapper(
- vector_stores.list,
- )
- self.delete = to_raw_response_wrapper(
- vector_stores.delete,
- )
- self.search = to_raw_response_wrapper(
- vector_stores.search,
- )
-
- @cached_property
- def file_batches(self) -> FileBatchesResourceWithRawResponse:
- return FileBatchesResourceWithRawResponse(self._vector_stores.file_batches)
-
- @cached_property
- def files(self) -> FilesResourceWithRawResponse:
- return FilesResourceWithRawResponse(self._vector_stores.files)
-
-
-class AsyncVectorStoresResourceWithRawResponse:
- def __init__(self, vector_stores: AsyncVectorStoresResource) -> None:
- self._vector_stores = vector_stores
-
- self.create = async_to_raw_response_wrapper(
- vector_stores.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- vector_stores.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- vector_stores.update,
- )
- self.list = async_to_raw_response_wrapper(
- vector_stores.list,
- )
- self.delete = async_to_raw_response_wrapper(
- vector_stores.delete,
- )
- self.search = async_to_raw_response_wrapper(
- vector_stores.search,
- )
-
- @cached_property
- def file_batches(self) -> AsyncFileBatchesResourceWithRawResponse:
- return AsyncFileBatchesResourceWithRawResponse(self._vector_stores.file_batches)
-
- @cached_property
- def files(self) -> AsyncFilesResourceWithRawResponse:
- return AsyncFilesResourceWithRawResponse(self._vector_stores.files)
-
-
-class VectorStoresResourceWithStreamingResponse:
- def __init__(self, vector_stores: VectorStoresResource) -> None:
- self._vector_stores = vector_stores
-
- self.create = to_streamed_response_wrapper(
- vector_stores.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- vector_stores.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- vector_stores.update,
- )
- self.list = to_streamed_response_wrapper(
- vector_stores.list,
- )
- self.delete = to_streamed_response_wrapper(
- vector_stores.delete,
- )
- self.search = to_streamed_response_wrapper(
- vector_stores.search,
- )
-
- @cached_property
- def file_batches(self) -> FileBatchesResourceWithStreamingResponse:
- return FileBatchesResourceWithStreamingResponse(self._vector_stores.file_batches)
-
- @cached_property
- def files(self) -> FilesResourceWithStreamingResponse:
- return FilesResourceWithStreamingResponse(self._vector_stores.files)
-
-
-class AsyncVectorStoresResourceWithStreamingResponse:
- def __init__(self, vector_stores: AsyncVectorStoresResource) -> None:
- self._vector_stores = vector_stores
-
- self.create = async_to_streamed_response_wrapper(
- vector_stores.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- vector_stores.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- vector_stores.update,
- )
- self.list = async_to_streamed_response_wrapper(
- vector_stores.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- vector_stores.delete,
- )
- self.search = async_to_streamed_response_wrapper(
- vector_stores.search,
- )
-
- @cached_property
- def file_batches(self) -> AsyncFileBatchesResourceWithStreamingResponse:
- return AsyncFileBatchesResourceWithStreamingResponse(self._vector_stores.file_batches)
-
- @cached_property
- def files(self) -> AsyncFilesResourceWithStreamingResponse:
- return AsyncFilesResourceWithStreamingResponse(self._vector_stores.files)
diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py
index 49c8d424..144bfd42 100644
--- a/src/digitalocean_genai_sdk/types/__init__.py
+++ b/src/digitalocean_genai_sdk/types/__init__.py
@@ -2,135 +2,9 @@
from __future__ import annotations
-from .batch import Batch as Batch
from .model import Model as Model
-from .upload import Upload as Upload
-from .response import Response as Response
-from .includable import Includable as Includable
-from .openai_file import OpenAIFile as OpenAIFile
-from .input_content import InputContent as InputContent
-from .input_message import InputMessage as InputMessage
-from .thread_object import ThreadObject as ThreadObject
-from .output_message import OutputMessage as OutputMessage
-from .reasoning_item import ReasoningItem as ReasoningItem
-from .usage_response import UsageResponse as UsageResponse
-from .compound_filter import CompoundFilter as CompoundFilter
-from .function_object import FunctionObject as FunctionObject
-from .images_response import ImagesResponse as ImagesResponse
-from .assistant_object import AssistantObject as AssistantObject
-from .file_list_params import FileListParams as FileListParams
-from .reasoning_effort import ReasoningEffort as ReasoningEffort
-from .voice_ids_shared import VoiceIDsShared as VoiceIDsShared
-from .batch_list_params import BatchListParams as BatchListParams
-from .comparison_filter import ComparisonFilter as ComparisonFilter
-from .computer_tool_call import ComputerToolCall as ComputerToolCall
-from .file_list_response import FileListResponse as FileListResponse
-from .file_search_ranker import FileSearchRanker as FileSearchRanker
-from .file_upload_params import FileUploadParams as FileUploadParams
-from .function_tool_call import FunctionToolCall as FunctionToolCall
-from .batch_create_params import BatchCreateParams as BatchCreateParams
-from .batch_list_response import BatchListResponse as BatchListResponse
-from .input_content_param import InputContentParam as InputContentParam
-from .input_message_param import InputMessageParam as InputMessageParam
from .model_list_response import ModelListResponse as ModelListResponse
-from .response_properties import ResponseProperties as ResponseProperties
-from .vector_store_object import VectorStoreObject as VectorStoreObject
-from .assistant_tools_code import AssistantToolsCode as AssistantToolsCode
-from .audit_log_actor_user import AuditLogActorUser as AuditLogActorUser
-from .audit_log_event_type import AuditLogEventType as AuditLogEventType
-from .file_delete_response import FileDeleteResponse as FileDeleteResponse
-from .output_message_param import OutputMessageParam as OutputMessageParam
-from .reasoning_item_param import ReasoningItemParam as ReasoningItemParam
-from .thread_create_params import ThreadCreateParams as ThreadCreateParams
-from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams
-from .upload_create_params import UploadCreateParams as UploadCreateParams
-from .web_search_tool_call import WebSearchToolCall as WebSearchToolCall
-from .assistant_list_params import AssistantListParams as AssistantListParams
-from .compound_filter_param import CompoundFilterParam as CompoundFilterParam
-from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall
-from .function_object_param import FunctionObjectParam as FunctionObjectParam
-from .model_delete_response import ModelDeleteResponse as ModelDeleteResponse
-from .transcription_segment import TranscriptionSegment as TranscriptionSegment
-from .response_create_params import ResponseCreateParams as ResponseCreateParams
-from .thread_delete_response import ThreadDeleteResponse as ThreadDeleteResponse
-from .upload_add_part_params import UploadAddPartParams as UploadAddPartParams
-from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
-from .voice_ids_shared_param import VoiceIDsSharedParam as VoiceIDsSharedParam
-from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
-from .assistant_list_response import AssistantListResponse as AssistantListResponse
-from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
-from .comparison_filter_param import ComparisonFilterParam as ComparisonFilterParam
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
-from .assistant_tools_function import AssistantToolsFunction as AssistantToolsFunction
-from .completion_create_params import CompletionCreateParams as CompletionCreateParams
-from .computer_tool_call_param import ComputerToolCallParam as ComputerToolCallParam
-from .function_tool_call_param import FunctionToolCallParam as FunctionToolCallParam
-from .image_create_edit_params import ImageCreateEditParams as ImageCreateEditParams
-from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams
-from .static_chunking_strategy import StaticChunkingStrategy as StaticChunkingStrategy
from .stop_configuration_param import StopConfigurationParam as StopConfigurationParam
-from .upload_add_part_response import UploadAddPartResponse as UploadAddPartResponse
-from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
-from .assistant_delete_response import AssistantDeleteResponse as AssistantDeleteResponse
-from .computer_tool_call_output import ComputerToolCallOutput as ComputerToolCallOutput
from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse
-from .function_tool_call_output import FunctionToolCallOutput as FunctionToolCallOutput
-from .model_response_properties import ModelResponseProperties as ModelResponseProperties
-from .assistant_supported_models import AssistantSupportedModels as AssistantSupportedModels
-from .assistant_tools_code_param import AssistantToolsCodeParam as AssistantToolsCodeParam
-from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
-from .moderation_classify_params import ModerationClassifyParams as ModerationClassifyParams
-from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
-from .vector_store_list_response import VectorStoreListResponse as VectorStoreListResponse
-from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
-from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
-from .web_search_tool_call_param import WebSearchToolCallParam as WebSearchToolCallParam
-from .assistant_tools_file_search import AssistantToolsFileSearch as AssistantToolsFileSearch
-from .create_thread_request_param import CreateThreadRequestParam as CreateThreadRequestParam
-from .file_search_tool_call_param import FileSearchToolCallParam as FileSearchToolCallParam
-from .audio_generate_speech_params import AudioGenerateSpeechParams as AudioGenerateSpeechParams
-from .audio_translate_audio_params import AudioTranslateAudioParams as AudioTranslateAudioParams
-from .moderation_classify_response import ModerationClassifyResponse as ModerationClassifyResponse
-from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse
-from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
-from .audio_transcribe_audio_params import AudioTranscribeAudioParams as AudioTranscribeAudioParams
-from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
-from .organization_get_costs_params import OrganizationGetCostsParams as OrganizationGetCostsParams
-from .vector_store_expiration_after import VectorStoreExpirationAfter as VectorStoreExpirationAfter
-from .assistant_tools_function_param import AssistantToolsFunctionParam as AssistantToolsFunctionParam
-from .audio_translate_audio_response import AudioTranslateAudioResponse as AudioTranslateAudioResponse
-from .file_retrieve_content_response import FileRetrieveContentResponse as FileRetrieveContentResponse
-from .image_create_generation_params import ImageCreateGenerationParams as ImageCreateGenerationParams
-from .realtime_create_session_params import RealtimeCreateSessionParams as RealtimeCreateSessionParams
-from .static_chunking_strategy_param import StaticChunkingStrategyParam as StaticChunkingStrategyParam
-from .audio_transcribe_audio_response import AudioTranscribeAudioResponse as AudioTranscribeAudioResponse
-from .computer_tool_call_output_param import ComputerToolCallOutputParam as ComputerToolCallOutputParam
-from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck as ComputerToolCallSafetyCheck
-from .function_tool_call_output_param import FunctionToolCallOutputParam as FunctionToolCallOutputParam
-from .realtime_create_session_response import RealtimeCreateSessionResponse as RealtimeCreateSessionResponse
-from .response_list_input_items_params import ResponseListInputItemsParams as ResponseListInputItemsParams
-from .assistant_tools_file_search_param import AssistantToolsFileSearchParam as AssistantToolsFileSearchParam
-from .response_list_input_items_response import ResponseListInputItemsResponse as ResponseListInputItemsResponse
-from .organization_list_audit_logs_params import OrganizationListAuditLogsParams as OrganizationListAuditLogsParams
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam as VectorStoreExpirationAfterParam
-from .auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam as AutoChunkingStrategyRequestParam
from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam
-from .assistants_api_response_format_option import (
- AssistantsAPIResponseFormatOption as AssistantsAPIResponseFormatOption,
-)
-from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam as ComputerToolCallSafetyCheckParam
-from .organization_list_audit_logs_response import (
- OrganizationListAuditLogsResponse as OrganizationListAuditLogsResponse,
-)
-from .static_chunking_strategy_request_param import (
- StaticChunkingStrategyRequestParam as StaticChunkingStrategyRequestParam,
-)
-from .assistants_api_response_format_option_param import (
- AssistantsAPIResponseFormatOptionParam as AssistantsAPIResponseFormatOptionParam,
-)
-from .realtime_create_transcription_session_params import (
- RealtimeCreateTranscriptionSessionParams as RealtimeCreateTranscriptionSessionParams,
-)
-from .realtime_create_transcription_session_response import (
- RealtimeCreateTranscriptionSessionResponse as RealtimeCreateTranscriptionSessionResponse,
-)
diff --git a/src/digitalocean_genai_sdk/types/assistant_create_params.py b/src/digitalocean_genai_sdk/types/assistant_create_params.py
deleted file mode 100644
index b89e4742..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_create_params.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .reasoning_effort import ReasoningEffort
-from .assistant_supported_models import AssistantSupportedModels
-from .assistant_tools_code_param import AssistantToolsCodeParam
-from .assistant_tools_function_param import AssistantToolsFunctionParam
-from .assistant_tools_file_search_param import AssistantToolsFileSearchParam
-from .assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = [
- "AssistantCreateParams",
- "ToolResources",
- "ToolResourcesCodeInterpreter",
- "ToolResourcesFileSearch",
- "ToolResourcesFileSearchVectorStore",
- "ToolResourcesFileSearchVectorStoreChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic",
- "Tool",
-]
-
-
-class AssistantCreateParams(TypedDict, total=False):
- model: Required[Union[str, AssistantSupportedModels]]
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- description: Optional[str]
- """The description of the assistant. The maximum length is 512 characters."""
-
- instructions: Optional[str]
- """The system instructions that the assistant uses.
-
- The maximum length is 256,000 characters.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- name: Optional[str]
- """The name of the assistant. The maximum length is 256 characters."""
-
- reasoning_effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOptionParam]
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- tool_resources: Optional[ToolResources]
- """A set of resources that are used by the assistant's tools.
-
- The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
- tools: Iterable[Tool]
- """A list of tool enabled on the assistant.
-
- There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `file_search`, or `function`.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False):
- type: Required[Literal["auto"]]
- """Always `auto`."""
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False):
- chunk_overlap_tokens: Required[int]
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: Required[int]
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False):
- static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic]
-
- type: Required[Literal["static"]]
- """Always `static`."""
-
-
-ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
- ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy,
- ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy,
-]
-
-
-class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
- chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
-
- file_ids: List[str]
- """A list of [file](/docs/api-reference/files) IDs to add to the vector store.
-
- There can be a maximum of 10000 files in a vector store.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- assistant. There can be a maximum of 1 vector store attached to the assistant.
- """
-
- vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
- """
- A helper to create a [vector store](/docs/api-reference/vector-stores/object)
- with file_ids and attach it to this assistant. There can be a maximum of 1
- vector store attached to the assistant.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
-
-
-Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam]
diff --git a/src/digitalocean_genai_sdk/types/assistant_delete_response.py b/src/digitalocean_genai_sdk/types/assistant_delete_response.py
deleted file mode 100644
index 04207049..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["AssistantDeleteResponse"]
-
-
-class AssistantDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["assistant.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/assistant_list_params.py b/src/digitalocean_genai_sdk/types/assistant_list_params.py
deleted file mode 100644
index 834ffbca..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_list_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["AssistantListParams"]
-
-
-class AssistantListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/assistant_list_response.py b/src/digitalocean_genai_sdk/types/assistant_list_response.py
deleted file mode 100644
index dfc90bfa..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-from .assistant_object import AssistantObject
-
-__all__ = ["AssistantListResponse"]
-
-
-class AssistantListResponse(BaseModel):
- data: List[AssistantObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/assistant_object.py b/src/digitalocean_genai_sdk/types/assistant_object.py
deleted file mode 100644
index 4aa71ab9..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_object.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-from .assistant_tools_code import AssistantToolsCode
-from .assistant_tools_function import AssistantToolsFunction
-from .assistant_tools_file_search import AssistantToolsFileSearch
-from .assistants_api_response_format_option import AssistantsAPIResponseFormatOption
-
-__all__ = ["AssistantObject", "Tool", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
-
-Tool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearch, AssistantToolsFunction]
-
-
-class ToolResourcesCodeInterpreter(BaseModel):
- file_ids: Optional[List[str]] = None
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter`` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearch(BaseModel):
- vector_store_ids: Optional[List[str]] = None
- """
- The ID of the [vector store](/docs/api-reference/vector-stores/object) attached
- to this assistant. There can be a maximum of 1 vector store attached to the
- assistant.
- """
-
-
-class ToolResources(BaseModel):
- code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
-
- file_search: Optional[ToolResourcesFileSearch] = None
-
-
-class AssistantObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the assistant was created."""
-
- description: Optional[str] = None
- """The description of the assistant. The maximum length is 512 characters."""
-
- instructions: Optional[str] = None
- """The system instructions that the assistant uses.
-
- The maximum length is 256,000 characters.
- """
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: str
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- name: Optional[str] = None
- """The name of the assistant. The maximum length is 256 characters."""
-
- object: Literal["assistant"]
- """The object type, which is always `assistant`."""
-
- tools: List[Tool]
- """A list of tool enabled on the assistant.
-
- There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `file_search`, or `function`.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOption] = None
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- temperature: Optional[float] = None
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- tool_resources: Optional[ToolResources] = None
- """A set of resources that are used by the assistant's tools.
-
- The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
- top_p: Optional[float] = None
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
diff --git a/src/digitalocean_genai_sdk/types/assistant_supported_models.py b/src/digitalocean_genai_sdk/types/assistant_supported_models.py
deleted file mode 100644
index 999b7f23..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_supported_models.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["AssistantSupportedModels"]
-
-AssistantSupportedModels: TypeAlias = Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4.5-preview",
- "gpt-4.5-preview-2025-02-27",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
-]
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_code.py b/src/digitalocean_genai_sdk/types/assistant_tools_code.py
deleted file mode 100644
index 73a40a71..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_code.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["AssistantToolsCode"]
-
-
-class AssistantToolsCode(BaseModel):
- type: Literal["code_interpreter"]
- """The type of tool being defined: `code_interpreter`"""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py
deleted file mode 100644
index 01420dda..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["AssistantToolsCodeParam"]
-
-
-class AssistantToolsCodeParam(TypedDict, total=False):
- type: Required[Literal["code_interpreter"]]
- """The type of tool being defined: `code_interpreter`"""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py b/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py
deleted file mode 100644
index 3c834718..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .file_search_ranker import FileSearchRanker
-
-__all__ = ["AssistantToolsFileSearch", "FileSearch", "FileSearchRankingOptions"]
-
-
-class FileSearchRankingOptions(BaseModel):
- score_threshold: float
- """The score threshold for the file search.
-
- All values must be a floating point number between 0 and 1.
- """
-
- ranker: Optional[FileSearchRanker] = None
- """The ranker to use for the file search.
-
- If not specified will use the `auto` ranker.
- """
-
-
-class FileSearch(BaseModel):
- max_num_results: Optional[int] = None
- """The maximum number of results the file search tool should output.
-
- The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
- should be between 1 and 50 inclusive.
-
- Note that the file search tool may output fewer than `max_num_results` results.
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
- ranking_options: Optional[FileSearchRankingOptions] = None
- """The ranking options for the file search.
-
- If not specified, the file search tool will use the `auto` ranker and a
- score_threshold of 0.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
-
-class AssistantToolsFileSearch(BaseModel):
- type: Literal["file_search"]
- """The type of tool being defined: `file_search`"""
-
- file_search: Optional[FileSearch] = None
- """Overrides for the file search tool."""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py
deleted file mode 100644
index 3f0e5af4..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from .file_search_ranker import FileSearchRanker
-
-__all__ = ["AssistantToolsFileSearchParam", "FileSearch", "FileSearchRankingOptions"]
-
-
-class FileSearchRankingOptions(TypedDict, total=False):
- score_threshold: Required[float]
- """The score threshold for the file search.
-
- All values must be a floating point number between 0 and 1.
- """
-
- ranker: FileSearchRanker
- """The ranker to use for the file search.
-
- If not specified will use the `auto` ranker.
- """
-
-
-class FileSearch(TypedDict, total=False):
- max_num_results: int
- """The maximum number of results the file search tool should output.
-
- The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
- should be between 1 and 50 inclusive.
-
- Note that the file search tool may output fewer than `max_num_results` results.
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
- ranking_options: FileSearchRankingOptions
- """The ranking options for the file search.
-
- If not specified, the file search tool will use the `auto` ranker and a
- score_threshold of 0.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
-
-class AssistantToolsFileSearchParam(TypedDict, total=False):
- type: Required[Literal["file_search"]]
- """The type of tool being defined: `file_search`"""
-
- file_search: FileSearch
- """Overrides for the file search tool."""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_function.py b/src/digitalocean_genai_sdk/types/assistant_tools_function.py
deleted file mode 100644
index 89326d54..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_function.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .function_object import FunctionObject
-
-__all__ = ["AssistantToolsFunction"]
-
-
-class AssistantToolsFunction(BaseModel):
- function: FunctionObject
-
- type: Literal["function"]
- """The type of tool being defined: `function`"""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py
deleted file mode 100644
index 4e9ecf3d..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from .function_object_param import FunctionObjectParam
-
-__all__ = ["AssistantToolsFunctionParam"]
-
-
-class AssistantToolsFunctionParam(TypedDict, total=False):
- function: Required[FunctionObjectParam]
-
- type: Required[Literal["function"]]
- """The type of tool being defined: `function`"""
diff --git a/src/digitalocean_genai_sdk/types/assistant_update_params.py b/src/digitalocean_genai_sdk/types/assistant_update_params.py
deleted file mode 100644
index cf301dd4..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_update_params.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import TypeAlias, TypedDict
-
-from .reasoning_effort import ReasoningEffort
-from .assistant_supported_models import AssistantSupportedModels
-from .assistant_tools_code_param import AssistantToolsCodeParam
-from .assistant_tools_function_param import AssistantToolsFunctionParam
-from .assistant_tools_file_search_param import AssistantToolsFileSearchParam
-from .assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "Tool"]
-
-
-class AssistantUpdateParams(TypedDict, total=False):
- description: Optional[str]
- """The description of the assistant. The maximum length is 512 characters."""
-
- instructions: Optional[str]
- """The system instructions that the assistant uses.
-
- The maximum length is 256,000 characters.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: Union[str, AssistantSupportedModels]
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- name: Optional[str]
- """The name of the assistant. The maximum length is 256 characters."""
-
- reasoning_effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOptionParam]
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- tool_resources: Optional[ToolResources]
- """A set of resources that are used by the assistant's tools.
-
- The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
- tools: Iterable[Tool]
- """A list of tool enabled on the assistant.
-
- There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `file_search`, or `function`.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- Overrides the list of [file](/docs/api-reference/files) IDs made available to
- the `code_interpreter` tool. There can be a maximum of 20 files associated with
- the tool.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- Overrides the [vector store](/docs/api-reference/vector-stores/object) attached
- to this assistant. There can be a maximum of 1 vector store attached to the
- assistant.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
-
-
-Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam]
diff --git a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py
deleted file mode 100644
index 07c4f71e..00000000
--- a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-from .chat.response_format_text import ResponseFormatText
-from .chat.response_format_json_object import ResponseFormatJsonObject
-from .chat.response_format_json_schema import ResponseFormatJsonSchema
-
-__all__ = ["AssistantsAPIResponseFormatOption"]
-
-AssistantsAPIResponseFormatOption: TypeAlias = Union[
- Literal["auto"], ResponseFormatText, ResponseFormatJsonObject, ResponseFormatJsonSchema
-]
diff --git a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py
deleted file mode 100644
index 7dbf967f..00000000
--- a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-from .chat.response_format_text_param import ResponseFormatTextParam
-from .chat.response_format_json_object_param import ResponseFormatJsonObjectParam
-from .chat.response_format_json_schema_param import ResponseFormatJsonSchemaParam
-
-__all__ = ["AssistantsAPIResponseFormatOptionParam"]
-
-AssistantsAPIResponseFormatOptionParam: TypeAlias = Union[
- Literal["auto"], ResponseFormatTextParam, ResponseFormatJsonObjectParam, ResponseFormatJsonSchemaParam
-]
diff --git a/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py b/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py
deleted file mode 100644
index 8857594a..00000000
--- a/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypedDict
-
-from .voice_ids_shared_param import VoiceIDsSharedParam
-
-__all__ = ["AudioGenerateSpeechParams"]
-
-
-class AudioGenerateSpeechParams(TypedDict, total=False):
- input: Required[str]
- """The text to generate audio for. The maximum length is 4096 characters."""
-
- model: Required[Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]]]
- """
- One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or
- `gpt-4o-mini-tts`.
- """
-
- voice: Required[VoiceIDsSharedParam]
- """The voice to use when generating the audio.
-
- Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`,
- `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in
- the [Text to speech guide](/docs/guides/text-to-speech#voice-options).
- """
-
- instructions: str
- """Control the voice of your generated audio with additional instructions.
-
- Does not work with `tts-1` or `tts-1-hd`.
- """
-
- response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
- """The format to audio in.
-
- Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
- """
-
- speed: float
- """The speed of the generated audio.
-
- Select a value from `0.25` to `4.0`. `1.0` is the default.
- """
diff --git a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py
deleted file mode 100644
index cbc15157..00000000
--- a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["AudioTranscribeAudioParams"]
-
-
-class AudioTranscribeAudioParams(TypedDict, total=False):
- file: Required[FileTypes]
- """
- The audio file object (not file name) to transcribe, in one of these formats:
- flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- """
-
- model: Required[Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]]]
- """ID of the model to use.
-
- The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1`
- (which is powered by our open source Whisper V2 model).
- """
-
- include: List[Literal["logprobs"]]
- """Additional information to include in the transcription response.
-
- `logprobs` will return the log probabilities of the tokens in the response to
- understand the model's confidence in the transcription. `logprobs` only works
- with response_format set to `json` and only with the models `gpt-4o-transcribe`
- and `gpt-4o-mini-transcribe`.
- """
-
- language: str
- """The language of the input audio.
-
- Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
- """
-
- prompt: str
- """An optional text to guide the model's style or continue a previous audio
- segment.
-
- The [prompt](/docs/guides/speech-to-text#prompting) should match the audio
- language.
- """
-
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
- """
- The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
- the only supported format is `json`.
- """
-
- stream: Optional[bool]
- """
- If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the
- [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
- for more information.
-
- Note: Streaming is not supported for the `whisper-1` model and will be ignored.
- """
-
- temperature: float
- """The sampling temperature, between 0 and 1.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
- """
-
- timestamp_granularities: List[Literal["word", "segment"]]
- """The timestamp granularities to populate for this transcription.
-
- `response_format` must be set `verbose_json` to use timestamp granularities.
- Either or both of these options are supported: `word`, or `segment`. Note: There
- is no additional latency for segment timestamps, but generating word timestamps
- incurs additional latency.
- """
diff --git a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py
deleted file mode 100644
index 54b999ed..00000000
--- a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import TypeAlias
-
-from .._models import BaseModel
-from .transcription_segment import TranscriptionSegment
-
-__all__ = [
- "AudioTranscribeAudioResponse",
- "CreateTranscriptionResponseJson",
- "CreateTranscriptionResponseJsonLogprob",
- "CreateTranscriptionResponseVerboseJson",
- "CreateTranscriptionResponseVerboseJsonWord",
-]
-
-
-class CreateTranscriptionResponseJsonLogprob(BaseModel):
- token: str
- """The token that was used to generate the log probability."""
-
- bytes: List[int]
- """The bytes that were used to generate the log probability."""
-
- logprob: float
- """The log probability of the token."""
-
-
-class CreateTranscriptionResponseJson(BaseModel):
- text: str
- """The transcribed text."""
-
- logprobs: Optional[List[CreateTranscriptionResponseJsonLogprob]] = None
- """The log probabilities of the tokens in the transcription.
-
- Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`
- if `logprobs` is added to the `include` array.
- """
-
-
-class CreateTranscriptionResponseVerboseJsonWord(BaseModel):
- end: float
- """End time of the word in seconds."""
-
- start: float
- """Start time of the word in seconds."""
-
- word: str
- """The text content of the word."""
-
-
-class CreateTranscriptionResponseVerboseJson(BaseModel):
- duration: float
- """The duration of the input audio."""
-
- language: str
- """The language of the input audio."""
-
- text: str
- """The transcribed text."""
-
- segments: Optional[List[TranscriptionSegment]] = None
- """Segments of the transcribed text and their corresponding details."""
-
- words: Optional[List[CreateTranscriptionResponseVerboseJsonWord]] = None
- """Extracted words and their corresponding timestamps."""
-
-
-AudioTranscribeAudioResponse: TypeAlias = Union[CreateTranscriptionResponseJson, CreateTranscriptionResponseVerboseJson]
diff --git a/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py b/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py
deleted file mode 100644
index cc222f14..00000000
--- a/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["AudioTranslateAudioParams"]
-
-
-class AudioTranslateAudioParams(TypedDict, total=False):
- file: Required[FileTypes]
- """
- The audio file object (not file name) translate, in one of these formats: flac,
- mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- """
-
- model: Required[Union[str, Literal["whisper-1"]]]
- """ID of the model to use.
-
- Only `whisper-1` (which is powered by our open source Whisper V2 model) is
- currently available.
- """
-
- prompt: str
- """An optional text to guide the model's style or continue a previous audio
- segment.
-
- The [prompt](/docs/guides/speech-to-text#prompting) should be in English.
- """
-
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
- """
- The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`.
- """
-
- temperature: float
- """The sampling temperature, between 0 and 1.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
- """
diff --git a/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py b/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py
deleted file mode 100644
index 74d08a73..00000000
--- a/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import TypeAlias
-
-from .._models import BaseModel
-from .transcription_segment import TranscriptionSegment
-
-__all__ = ["AudioTranslateAudioResponse", "CreateTranslationResponseJson", "CreateTranslationResponseVerboseJson"]
-
-
-class CreateTranslationResponseJson(BaseModel):
- text: str
-
-
-class CreateTranslationResponseVerboseJson(BaseModel):
- duration: float
- """The duration of the input audio."""
-
- language: str
- """The language of the output translation (always `english`)."""
-
- text: str
- """The translated text."""
-
- segments: Optional[List[TranscriptionSegment]] = None
- """Segments of the translated text and their corresponding details."""
-
-
-AudioTranslateAudioResponse: TypeAlias = Union[CreateTranslationResponseJson, CreateTranslationResponseVerboseJson]
diff --git a/src/digitalocean_genai_sdk/types/audit_log_actor_user.py b/src/digitalocean_genai_sdk/types/audit_log_actor_user.py
deleted file mode 100644
index f3da325d..00000000
--- a/src/digitalocean_genai_sdk/types/audit_log_actor_user.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from .._models import BaseModel
-
-__all__ = ["AuditLogActorUser"]
-
-
-class AuditLogActorUser(BaseModel):
- id: Optional[str] = None
- """The user id."""
-
- email: Optional[str] = None
- """The user email."""
diff --git a/src/digitalocean_genai_sdk/types/audit_log_event_type.py b/src/digitalocean_genai_sdk/types/audit_log_event_type.py
deleted file mode 100644
index 2031cbb8..00000000
--- a/src/digitalocean_genai_sdk/types/audit_log_event_type.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["AuditLogEventType"]
-
-AuditLogEventType: TypeAlias = Literal[
- "api_key.created",
- "api_key.updated",
- "api_key.deleted",
- "invite.sent",
- "invite.accepted",
- "invite.deleted",
- "login.succeeded",
- "login.failed",
- "logout.succeeded",
- "logout.failed",
- "organization.updated",
- "project.created",
- "project.updated",
- "project.archived",
- "service_account.created",
- "service_account.updated",
- "service_account.deleted",
- "rate_limit.updated",
- "rate_limit.deleted",
- "user.added",
- "user.updated",
- "user.deleted",
-]
diff --git a/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py
deleted file mode 100644
index 5c0c131e..00000000
--- a/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["AutoChunkingStrategyRequestParam"]
-
-
-class AutoChunkingStrategyRequestParam(TypedDict, total=False):
- type: Required[Literal["auto"]]
- """Always `auto`."""
diff --git a/src/digitalocean_genai_sdk/types/batch.py b/src/digitalocean_genai_sdk/types/batch.py
deleted file mode 100644
index 1fdd6928..00000000
--- a/src/digitalocean_genai_sdk/types/batch.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["Batch", "Errors", "ErrorsData", "RequestCounts"]
-
-
-class ErrorsData(BaseModel):
- code: Optional[str] = None
- """An error code identifying the error type."""
-
- line: Optional[int] = None
- """The line number of the input file where the error occurred, if applicable."""
-
- message: Optional[str] = None
- """A human-readable message providing more details about the error."""
-
- param: Optional[str] = None
- """The name of the parameter that caused the error, if applicable."""
-
-
-class Errors(BaseModel):
- data: Optional[List[ErrorsData]] = None
-
- object: Optional[str] = None
- """The object type, which is always `list`."""
-
-
-class RequestCounts(BaseModel):
- completed: int
- """Number of requests that have been completed successfully."""
-
- failed: int
- """Number of requests that have failed."""
-
- total: int
- """Total number of requests in the batch."""
-
-
-class Batch(BaseModel):
- id: str
-
- completion_window: str
- """The time frame within which the batch should be processed."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the batch was created."""
-
- endpoint: str
- """The OpenAI API endpoint used by the batch."""
-
- input_file_id: str
- """The ID of the input file for the batch."""
-
- object: Literal["batch"]
- """The object type, which is always `batch`."""
-
- status: Literal[
- "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled"
- ]
- """The current status of the batch."""
-
- cancelled_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch was cancelled."""
-
- cancelling_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch started cancelling."""
-
- completed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch was completed."""
-
- error_file_id: Optional[str] = None
- """The ID of the file containing the outputs of requests with errors."""
-
- errors: Optional[Errors] = None
-
- expired_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch expired."""
-
- expires_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch will expire."""
-
- failed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch failed."""
-
- finalizing_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch started finalizing."""
-
- in_progress_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch started processing."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- output_file_id: Optional[str] = None
- """The ID of the file containing the outputs of successfully executed requests."""
-
- request_counts: Optional[RequestCounts] = None
- """The request counts for different statuses within the batch."""
diff --git a/src/digitalocean_genai_sdk/types/batch_create_params.py b/src/digitalocean_genai_sdk/types/batch_create_params.py
deleted file mode 100644
index 08243244..00000000
--- a/src/digitalocean_genai_sdk/types/batch_create_params.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["BatchCreateParams"]
-
-
-class BatchCreateParams(TypedDict, total=False):
- completion_window: Required[Literal["24h"]]
- """The time frame within which the batch should be processed.
-
- Currently only `24h` is supported.
- """
-
- endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
- """The endpoint to be used for all requests in the batch.
-
- Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and
- `/v1/completions` are supported. Note that `/v1/embeddings` batches are also
- restricted to a maximum of 50,000 embedding inputs across all requests in the
- batch.
- """
-
- input_file_id: Required[str]
- """The ID of an uploaded file that contains requests for the new batch.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your input file must be formatted as a
- [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with
- the purpose `batch`. The file can contain up to 50,000 requests, and can be up
- to 200 MB in size.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/batch_list_params.py b/src/digitalocean_genai_sdk/types/batch_list_params.py
deleted file mode 100644
index ef5e966b..00000000
--- a/src/digitalocean_genai_sdk/types/batch_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["BatchListParams"]
-
-
-class BatchListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/batch_list_response.py b/src/digitalocean_genai_sdk/types/batch_list_response.py
deleted file mode 100644
index 87c4f9b8..00000000
--- a/src/digitalocean_genai_sdk/types/batch_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .batch import Batch
-from .._models import BaseModel
-
-__all__ = ["BatchListResponse"]
-
-
-class BatchListResponse(BaseModel):
- data: List[Batch]
-
- has_more: bool
-
- object: Literal["list"]
-
- first_id: Optional[str] = None
-
- last_id: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/chat/__init__.py b/src/digitalocean_genai_sdk/types/chat/__init__.py
index cfa8c56a..7dbba0c2 100644
--- a/src/digitalocean_genai_sdk/types/chat/__init__.py
+++ b/src/digitalocean_genai_sdk/types/chat/__init__.py
@@ -6,25 +6,7 @@
from .token_logprob import TokenLogprob as TokenLogprob
from .create_response import CreateResponse as CreateResponse
from .response_message import ResponseMessage as ResponseMessage
-from .message_tool_call import MessageToolCall as MessageToolCall
-from .web_search_location import WebSearchLocation as WebSearchLocation
-from .response_format_text import ResponseFormatText as ResponseFormatText
-from .completion_list_params import CompletionListParams as CompletionListParams
-from .model_ids_shared_param import ModelIDsSharedParam as ModelIDsSharedParam
-from .message_tool_call_param import MessageToolCallParam as MessageToolCallParam
-from .web_search_context_size import WebSearchContextSize as WebSearchContextSize
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
-from .completion_list_response import CompletionListResponse as CompletionListResponse
-from .completion_update_params import CompletionUpdateParams as CompletionUpdateParams
-from .web_search_location_param import WebSearchLocationParam as WebSearchLocationParam
-from .completion_delete_response import CompletionDeleteResponse as CompletionDeleteResponse
-from .response_format_text_param import ResponseFormatTextParam as ResponseFormatTextParam
-from .response_format_json_object import ResponseFormatJsonObject as ResponseFormatJsonObject
-from .response_format_json_schema import ResponseFormatJsonSchema as ResponseFormatJsonSchema
-from .completion_list_messages_params import CompletionListMessagesParams as CompletionListMessagesParams
-from .completion_list_messages_response import CompletionListMessagesResponse as CompletionListMessagesResponse
-from .response_format_json_object_param import ResponseFormatJsonObjectParam as ResponseFormatJsonObjectParam
-from .response_format_json_schema_param import ResponseFormatJsonSchemaParam as ResponseFormatJsonSchemaParam
from .request_message_content_part_text_param import (
RequestMessageContentPartTextParam as RequestMessageContentPartTextParam,
)
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py b/src/digitalocean_genai_sdk/types/chat/completion_create_params.py
index d11f9322..fcbf22bb 100644
--- a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py
+++ b/src/digitalocean_genai_sdk/types/chat/completion_create_params.py
@@ -2,81 +2,31 @@
from __future__ import annotations
-from typing import Dict, List, Union, Iterable, Optional
+from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
-from ..reasoning_effort import ReasoningEffort
-from ..function_object_param import FunctionObjectParam
-from .model_ids_shared_param import ModelIDsSharedParam
-from ..voice_ids_shared_param import VoiceIDsSharedParam
-from .message_tool_call_param import MessageToolCallParam
-from .web_search_context_size import WebSearchContextSize
from ..stop_configuration_param import StopConfigurationParam
-from .web_search_location_param import WebSearchLocationParam
-from .response_format_text_param import ResponseFormatTextParam
-from .response_format_json_object_param import ResponseFormatJsonObjectParam
-from .response_format_json_schema_param import ResponseFormatJsonSchemaParam
from ..chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
from .request_message_content_part_text_param import RequestMessageContentPartTextParam
__all__ = [
"CompletionCreateParams",
"Message",
- "MessageChatCompletionRequestDeveloperMessage",
"MessageChatCompletionRequestSystemMessage",
+ "MessageChatCompletionRequestDeveloperMessage",
"MessageChatCompletionRequestUserMessage",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPart",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile",
"MessageChatCompletionRequestAssistantMessage",
- "MessageChatCompletionRequestAssistantMessageAudio",
"MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart",
"MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal",
- "MessageChatCompletionRequestAssistantMessageFunctionCall",
- "MessageChatCompletionRequestToolMessage",
- "MessageChatCompletionRequestFunctionMessage",
- "Audio",
- "FunctionCall",
- "FunctionCallChatCompletionFunctionCallOption",
- "Function",
- "Prediction",
- "ResponseFormat",
- "ToolChoice",
- "ToolChoiceChatCompletionNamedToolChoice",
- "ToolChoiceChatCompletionNamedToolChoiceFunction",
- "Tool",
- "WebSearchOptions",
- "WebSearchOptionsUserLocation",
]
class CompletionCreateParams(TypedDict, total=False):
messages: Required[Iterable[Message]]
- """A list of messages comprising the conversation so far.
-
- Depending on the [model](/docs/models) you use, different message types
- (modalities) are supported, like [text](/docs/guides/text-generation),
- [images](/docs/guides/vision), and [audio](/docs/guides/audio).
- """
-
- model: Required[ModelIDsSharedParam]
- """Model ID used to generate the response, like `gpt-4o` or `o1`.
-
- OpenAI offers a wide range of models with different capabilities, performance
- characteristics, and price points. Refer to the [model guide](/docs/models) to
- browse and compare available models.
- """
+ """A list of messages comprising the conversation so far."""
- audio: Optional[Audio]
- """Parameters for audio output.
-
- Required when audio output is requested with `modalities: ["audio"]`.
- [Learn more](/docs/guides/audio).
- """
+ model: Required[str]
+ """Model ID used to generate the response."""
frequency_penalty: Optional[float]
"""Number between -2.0 and 2.0.
@@ -85,29 +35,6 @@ class CompletionCreateParams(TypedDict, total=False):
text so far, decreasing the model's likelihood to repeat the same line verbatim.
"""
- function_call: FunctionCall
- """Deprecated in favor of `tool_choice`.
-
- Controls which (if any) function is called by the model.
-
- `none` means the model will not call a function and instead generates a message.
-
- `auto` means the model can pick between generating a message or calling a
- function.
-
- Specifying a particular function via `{"name": "my_function"}` forces the model
- to call that function.
-
- `none` is the default when no functions are present. `auto` is the default if
- functions are present.
- """
-
- functions: Iterable[Function]
- """Deprecated in favor of `tools`.
-
- A list of functions the model may generate JSON inputs for.
- """
-
logit_bias: Optional[Dict[str, int]]
"""Modify the likelihood of specified tokens appearing in the completion.
@@ -128,18 +55,16 @@ class CompletionCreateParams(TypedDict, total=False):
max_completion_tokens: Optional[int]
"""
- An upper bound for the number of tokens that can be generated for a completion,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
+ The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
"""
max_tokens: Optional[int]
- """
- The maximum number of [tokens](/tokenizer) that can be generated in the chat
- completion. This value can be used to control
- [costs](https://openai.com/api/pricing/) for text generated via API.
+ """The maximum number of tokens that can be generated in the completion.
- This value is now deprecated in favor of `max_completion_tokens`, and is not
- compatible with [o1 series models](/docs/guides/reasoning).
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
"""
metadata: Optional[Dict[str, str]]
@@ -152,20 +77,6 @@ class CompletionCreateParams(TypedDict, total=False):
a maximum length of 512 characters.
"""
- modalities: Optional[List[Literal["text", "audio"]]]
- """
- Output types that you would like the model to generate. Most models are capable
- of generating text, which is the default:
-
- `["text"]`
-
- The `gpt-4o-audio-preview` model can also be used to
- [generate audio](/docs/guides/audio). To request that this model generate both
- text and audio responses, you can use:
-
- `["text", "audio"]`
- """
-
n: Optional[int]
"""How many chat completion choices to generate for each input message.
@@ -173,19 +84,6 @@ class CompletionCreateParams(TypedDict, total=False):
of the choices. Keep `n` as `1` to minimize costs.
"""
- parallel_tool_calls: bool
- """
- Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
- """
-
- prediction: Optional[Prediction]
- """
- Static predicted output content, such as the content of a text file that is
- being regenerated.
- """
-
presence_penalty: Optional[float]
"""Number between -2.0 and 2.0.
@@ -193,76 +91,16 @@ class CompletionCreateParams(TypedDict, total=False):
far, increasing the model's likelihood to talk about new topics.
"""
- reasoning_effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- response_format: ResponseFormat
- """An object specifying the format that the model must output.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
- """
-
- seed: Optional[int]
- """
- This feature is in Beta. If specified, our system will make a best effort to
- sample deterministically, such that repeated requests with the same `seed` and
- parameters should return the same result. Determinism is not guaranteed, and you
- should refer to the `system_fingerprint` response parameter to monitor changes
- in the backend.
- """
-
- service_tier: Optional[Literal["auto", "default"]]
- """Specifies the latency tier to use for processing the request.
-
- This parameter is relevant for customers subscribed to the scale tier service:
-
- - If set to 'auto', and the Project is Scale tier enabled, the system will
- utilize scale tier credits until they are exhausted.
- - If set to 'auto', and the Project is not Scale tier enabled, the request will
- be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
- - If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
- - When not set, the default behavior is 'auto'.
-
- When this parameter is set, the response body will include the `service_tier`
- utilized.
- """
-
stop: Optional[StopConfigurationParam]
"""Up to 4 sequences where the API will stop generating further tokens.
The returned text will not contain the stop sequence.
"""
- store: Optional[bool]
- """
- Whether or not to store the output of this chat completion request for use in
- our [model distillation](/docs/guides/distillation) or
- [evals](/docs/guides/evals) products.
- """
-
stream: Optional[bool]
"""
If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/chat/streaming) for more
- information, along with the
- [streaming responses](/docs/guides/streaming-responses) guide for more
- information on how to handle the streaming events.
+ generated using server-sent events.
"""
stream_options: Optional[ChatCompletionStreamOptionsParam]
@@ -276,27 +114,6 @@ class CompletionCreateParams(TypedDict, total=False):
this or `top_p` but not both.
"""
- tool_choice: ToolChoice
- """
- Controls which (if any) tool is called by the model. `none` means the model will
- not call any tool and instead generates a message. `auto` means the model can
- pick between generating a message or calling one or more tools. `required` means
- the model must call one or more tools. Specifying a particular tool via
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- `none` is the default when no tools are present. `auto` is the default if tools
- are present.
- """
-
- tools: Iterable[Tool]
- """A list of tools the model may call.
-
- Currently, only functions are supported as a tool. Use this to provide a list of
- functions the model may generate JSON inputs for. A max of 128 functions are
- supported.
- """
-
top_logprobs: Optional[int]
"""
An integer between 0 and 20 specifying the number of most likely tokens to
@@ -315,29 +132,8 @@ class CompletionCreateParams(TypedDict, total=False):
user: str
"""
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
-
- web_search_options: WebSearchOptions
- """
- This tool searches the web for relevant results to use in a response. Learn more
- about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
- """
-
-
-class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
- """The contents of the developer message."""
-
- role: Required[Literal["developer"]]
- """The role of the messages author, in this case `developer`."""
-
- name: str
- """An optional name for the participant.
-
- Provides the model information to differentiate between participants of the same
- role.
+ A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
"""
@@ -348,114 +144,22 @@ class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
role: Required[Literal["system"]]
"""The role of the messages author, in this case `system`."""
- name: str
- """An optional name for the participant.
-
- Provides the model information to differentiate between participants of the same
- role.
- """
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL(
- TypedDict, total=False
-):
- url: Required[str]
- """Either a URL of the image or the base64 encoded image data."""
-
- detail: Literal["auto", "low", "high"]
- """Specifies the detail level of the image.
-
- Learn more in the
- [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
- """
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage(
- TypedDict, total=False
-):
- image_url: Required[
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL
- ]
-
- type: Required[Literal["image_url"]]
- """The type of the content part."""
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio(
- TypedDict, total=False
-):
- data: Required[str]
- """Base64 encoded audio data."""
-
- format: Required[Literal["wav", "mp3"]]
- """The format of the encoded audio data. Currently supports "wav" and "mp3"."""
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio(
- TypedDict, total=False
-):
- input_audio: Required[
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio
- ]
-
- type: Required[Literal["input_audio"]]
- """The type of the content part. Always `input_audio`."""
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile(
- TypedDict, total=False
-):
- file_data: str
- """
- The base64 encoded file data, used when passing the file to the model as a
- string.
- """
-
- file_id: str
- """The ID of an uploaded file to use as input."""
-
- filename: str
- """The name of the file, used when passing the file to the model as a string."""
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile(
- TypedDict, total=False
-):
- file: Required[
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile
- ]
-
- type: Required[Literal["file"]]
- """The type of the content part. Always `file`."""
+class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
+ content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
+ """The contents of the developer message."""
-MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[
- RequestMessageContentPartTextParam,
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage,
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio,
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile,
-]
+ role: Required[Literal["developer"]]
+ """The role of the messages author, in this case `developer`."""
class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]]
+ content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
"""The contents of the user message."""
role: Required[Literal["user"]]
"""The role of the messages author, in this case `user`."""
- name: str
- """An optional name for the participant.
-
- Provides the model information to differentiate between participants of the same
- role.
- """
-
-
-class MessageChatCompletionRequestAssistantMessageAudio(TypedDict, total=False):
- id: Required[str]
- """Unique identifier for a previous audio response from the model."""
-
class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal(
TypedDict, total=False
@@ -473,190 +177,20 @@ class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatC
]
-class MessageChatCompletionRequestAssistantMessageFunctionCall(TypedDict, total=False):
- arguments: Required[str]
- """
- The arguments to call the function with, as generated by the model in JSON
- format. Note that the model does not always generate valid JSON, and may
- hallucinate parameters not defined by your function schema. Validate the
- arguments in your code before calling your function.
- """
-
- name: Required[str]
- """The name of the function to call."""
-
-
class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False):
role: Required[Literal["assistant"]]
"""The role of the messages author, in this case `assistant`."""
- audio: Optional[MessageChatCompletionRequestAssistantMessageAudio]
- """Data about a previous audio response from the model.
-
- [Learn more](/docs/guides/audio).
- """
-
content: Union[str, Iterable[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None]
- """The contents of the assistant message.
-
- Required unless `tool_calls` or `function_call` is specified.
- """
-
- function_call: Optional[MessageChatCompletionRequestAssistantMessageFunctionCall]
- """Deprecated and replaced by `tool_calls`.
-
- The name and arguments of a function that should be called, as generated by the
- model.
- """
-
- name: str
- """An optional name for the participant.
-
- Provides the model information to differentiate between participants of the same
- role.
- """
+ """The contents of the assistant message."""
refusal: Optional[str]
"""The refusal message by the assistant."""
- tool_calls: Iterable[MessageToolCallParam]
- """The tool calls generated by the model, such as function calls."""
-
-
-class MessageChatCompletionRequestToolMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
- """The contents of the tool message."""
-
- role: Required[Literal["tool"]]
- """The role of the messages author, in this case `tool`."""
-
- tool_call_id: Required[str]
- """Tool call that this message is responding to."""
-
-
-class MessageChatCompletionRequestFunctionMessage(TypedDict, total=False):
- content: Required[Optional[str]]
- """The contents of the function message."""
-
- name: Required[str]
- """The name of the function to call."""
-
- role: Required[Literal["function"]]
- """The role of the messages author, in this case `function`."""
-
Message: TypeAlias = Union[
- MessageChatCompletionRequestDeveloperMessage,
MessageChatCompletionRequestSystemMessage,
+ MessageChatCompletionRequestDeveloperMessage,
MessageChatCompletionRequestUserMessage,
MessageChatCompletionRequestAssistantMessage,
- MessageChatCompletionRequestToolMessage,
- MessageChatCompletionRequestFunctionMessage,
]
-
-
-class Audio(TypedDict, total=False):
- format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]]
- """Specifies the output audio format.
-
- Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
- """
-
- voice: Required[VoiceIDsSharedParam]
- """The voice the model uses to respond.
-
- Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and
- `shimmer`.
- """
-
-
-class FunctionCallChatCompletionFunctionCallOption(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
-
-FunctionCall: TypeAlias = Union[Literal["none", "auto"], FunctionCallChatCompletionFunctionCallOption]
-
-
-class Function(TypedDict, total=False):
- name: Required[str]
- """The name of the function to be called.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: str
- """
- A description of what the function does, used by the model to choose when and
- how to call the function.
- """
-
- parameters: Dict[str, object]
- """The parameters the functions accepts, described as a JSON Schema object.
-
- See the [guide](/docs/guides/function-calling) for examples, and the
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
- documentation about the format.
-
- Omitting `parameters` defines a function with an empty parameter list.
- """
-
-
-class Prediction(TypedDict, total=False):
- content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
- """
- The content that should be matched when generating a model response. If
- generated tokens would match this content, the entire model response can be
- returned much more quickly.
- """
-
- type: Required[Literal["content"]]
- """The type of the predicted content you want to provide.
-
- This type is currently always `content`.
- """
-
-
-ResponseFormat: TypeAlias = Union[ResponseFormatTextParam, ResponseFormatJsonSchemaParam, ResponseFormatJsonObjectParam]
-
-
-class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
-
-class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False):
- function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction]
-
- type: Required[Literal["function"]]
- """The type of the tool. Currently, only `function` is supported."""
-
-
-ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice]
-
-
-class Tool(TypedDict, total=False):
- function: Required[FunctionObjectParam]
-
- type: Required[Literal["function"]]
- """The type of the tool. Currently, only `function` is supported."""
-
-
-class WebSearchOptionsUserLocation(TypedDict, total=False):
- approximate: Required[WebSearchLocationParam]
- """Approximate location parameters for the search."""
-
- type: Required[Literal["approximate"]]
- """The type of location approximation. Always `approximate`."""
-
-
-class WebSearchOptions(TypedDict, total=False):
- search_context_size: WebSearchContextSize
- """
- High level guidance for the amount of context window space to use for the
- search. One of `low`, `medium`, or `high`. `medium` is the default.
- """
-
- user_location: Optional[WebSearchOptionsUserLocation]
- """Approximate location parameters for the search."""
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py b/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py
deleted file mode 100644
index 9e456e16..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["CompletionDeleteResponse"]
-
-
-class CompletionDeleteResponse(BaseModel):
- id: str
- """The ID of the chat completion that was deleted."""
-
- deleted: bool
- """Whether the chat completion was deleted."""
-
- object: Literal["chat.completion.deleted"]
- """The type of object being deleted."""
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py
deleted file mode 100644
index 43f4a7cc..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["CompletionListMessagesParams"]
-
-
-class CompletionListMessagesParams(TypedDict, total=False):
- after: str
- """Identifier for the last message from the previous pagination request."""
-
- limit: int
- """Number of messages to retrieve."""
-
- order: Literal["asc", "desc"]
- """Sort order for messages by timestamp.
-
- Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py
deleted file mode 100644
index 57087a63..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .response_message import ResponseMessage
-
-__all__ = ["CompletionListMessagesResponse", "Data"]
-
-
-class Data(ResponseMessage):
- id: str
- """The identifier of the chat message."""
-
-
-class CompletionListMessagesResponse(BaseModel):
- data: List[Data]
- """An array of chat completion message objects."""
-
- first_id: str
- """The identifier of the first chat message in the data array."""
-
- has_more: bool
- """Indicates whether there are more chat messages available."""
-
- last_id: str
- """The identifier of the last chat message in the data array."""
-
- object: Literal["list"]
- """The type of this object. It is always set to "list"."""
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_params.py b/src/digitalocean_genai_sdk/types/chat/completion_list_params.py
deleted file mode 100644
index 8f149e35..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_list_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["CompletionListParams"]
-
-
-class CompletionListParams(TypedDict, total=False):
- after: str
- """Identifier for the last chat completion from the previous pagination request."""
-
- limit: int
- """Number of Chat Completions to retrieve."""
-
- metadata: Optional[Dict[str, str]]
- """A list of metadata keys to filter the Chat Completions by. Example:
-
- `metadata[key1]=value1&metadata[key2]=value2`
- """
-
- model: str
- """The model used to generate the Chat Completions."""
-
- order: Literal["asc", "desc"]
- """Sort order for Chat Completions by timestamp.
-
- Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_response.py b/src/digitalocean_genai_sdk/types/chat/completion_list_response.py
deleted file mode 100644
index 2899f598..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_list_response.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .create_response import CreateResponse
-
-__all__ = ["CompletionListResponse"]
-
-
-class CompletionListResponse(BaseModel):
- data: List[CreateResponse]
- """An array of chat completion objects."""
-
- first_id: str
- """The identifier of the first chat completion in the data array."""
-
- has_more: bool
- """Indicates whether there are more Chat Completions available."""
-
- last_id: str
- """The identifier of the last chat completion in the data array."""
-
- object: Literal["list"]
- """The type of this object. It is always set to "list"."""
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_update_params.py b/src/digitalocean_genai_sdk/types/chat/completion_update_params.py
deleted file mode 100644
index 1f09ecaa..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_update_params.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["CompletionUpdateParams"]
-
-
-class CompletionUpdateParams(TypedDict, total=False):
- metadata: Required[Optional[Dict[str, str]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/create_response.py b/src/digitalocean_genai_sdk/types/chat/create_response.py
index a6320518..c80c56ac 100644
--- a/src/digitalocean_genai_sdk/types/chat/create_response.py
+++ b/src/digitalocean_genai_sdk/types/chat/create_response.py
@@ -20,14 +20,12 @@ class ChoiceLogprobs(BaseModel):
class Choice(BaseModel):
- finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
+ finish_reason: Literal["stop", "length"]
"""The reason the model stopped generating tokens.
This will be `stop` if the model hit a natural stop point or a provided stop
- sequence, `length` if the maximum number of tokens specified in the request was
- reached, `content_filter` if content was omitted due to a flag from our content
- filters, `tool_calls` if the model called a tool, or `function_call`
- (deprecated) if the model called a function.
+ sequence, or `length` if the maximum number of tokens specified in the request
+ was reached.
"""
index: int
@@ -59,15 +57,5 @@ class CreateResponse(BaseModel):
object: Literal["chat.completion"]
"""The object type, which is always `chat.completion`."""
- service_tier: Optional[Literal["scale", "default"]] = None
- """The service tier used for processing the request."""
-
- system_fingerprint: Optional[str] = None
- """This fingerprint represents the backend configuration that the model runs with.
-
- Can be used in conjunction with the `seed` request parameter to understand when
- backend changes have been made that might impact determinism.
- """
-
usage: Optional[Usage] = None
"""Usage statistics for the completion request."""
diff --git a/src/digitalocean_genai_sdk/types/chat/message_tool_call.py b/src/digitalocean_genai_sdk/types/chat/message_tool_call.py
deleted file mode 100644
index abc22e05..00000000
--- a/src/digitalocean_genai_sdk/types/chat/message_tool_call.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["MessageToolCall", "Function"]
-
-
-class Function(BaseModel):
- arguments: str
- """
- The arguments to call the function with, as generated by the model in JSON
- format. Note that the model does not always generate valid JSON, and may
- hallucinate parameters not defined by your function schema. Validate the
- arguments in your code before calling your function.
- """
-
- name: str
- """The name of the function to call."""
-
-
-class MessageToolCall(BaseModel):
- id: str
- """The ID of the tool call."""
-
- function: Function
- """The function that the model called."""
-
- type: Literal["function"]
- """The type of the tool. Currently, only `function` is supported."""
diff --git a/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py b/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py
deleted file mode 100644
index da60f69a..00000000
--- a/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["MessageToolCallParam", "Function"]
-
-
-class Function(TypedDict, total=False):
- arguments: Required[str]
- """
- The arguments to call the function with, as generated by the model in JSON
- format. Note that the model does not always generate valid JSON, and may
- hallucinate parameters not defined by your function schema. Validate the
- arguments in your code before calling your function.
- """
-
- name: Required[str]
- """The name of the function to call."""
-
-
-class MessageToolCallParam(TypedDict, total=False):
- id: Required[str]
- """The ID of the tool call."""
-
- function: Required[Function]
- """The function that the model called."""
-
- type: Required[Literal["function"]]
- """The type of the tool. Currently, only `function` is supported."""
diff --git a/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py b/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py
deleted file mode 100644
index 497ba18c..00000000
--- a/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["ModelIDsSharedParam"]
-
-ModelIDsSharedParam: TypeAlias = Union[
- str,
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- ],
-]
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py
deleted file mode 100644
index 17ca162a..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["ResponseFormatJsonObject"]
-
-
-class ResponseFormatJsonObject(BaseModel):
- type: Literal["json_object"]
- """The type of response format being defined. Always `json_object`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py
deleted file mode 100644
index 5296cec4..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ResponseFormatJsonObjectParam"]
-
-
-class ResponseFormatJsonObjectParam(TypedDict, total=False):
- type: Required[Literal["json_object"]]
- """The type of response format being defined. Always `json_object`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py
deleted file mode 100644
index a65bf052..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from ..._models import BaseModel
-
-__all__ = ["ResponseFormatJsonSchema", "JsonSchema"]
-
-
-class JsonSchema(BaseModel):
- name: str
- """The name of the response format.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: Optional[str] = None
- """
- A description of what the response format is for, used by the model to determine
- how to respond in the format.
- """
-
- schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None)
- """
- The schema for the response format, described as a JSON Schema object. Learn how
- to build JSON schemas [here](https://json-schema.org/).
- """
-
- strict: Optional[bool] = None
- """
- Whether to enable strict schema adherence when generating the output. If set to
- true, the model will always follow the exact schema defined in the `schema`
- field. Only a subset of JSON Schema is supported when `strict` is `true`. To
- learn more, read the
- [Structured Outputs guide](/docs/guides/structured-outputs).
- """
-
-
-class ResponseFormatJsonSchema(BaseModel):
- json_schema: JsonSchema
- """Structured Outputs configuration options, including a JSON Schema."""
-
- type: Literal["json_schema"]
- """The type of response format being defined. Always `json_schema`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py
deleted file mode 100644
index 32d254c3..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ResponseFormatJsonSchemaParam", "JsonSchema"]
-
-
-class JsonSchema(TypedDict, total=False):
- name: Required[str]
- """The name of the response format.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: str
- """
- A description of what the response format is for, used by the model to determine
- how to respond in the format.
- """
-
- schema: Dict[str, object]
- """
- The schema for the response format, described as a JSON Schema object. Learn how
- to build JSON schemas [here](https://json-schema.org/).
- """
-
- strict: Optional[bool]
- """
- Whether to enable strict schema adherence when generating the output. If set to
- true, the model will always follow the exact schema defined in the `schema`
- field. Only a subset of JSON Schema is supported when `strict` is `true`. To
- learn more, read the
- [Structured Outputs guide](/docs/guides/structured-outputs).
- """
-
-
-class ResponseFormatJsonSchemaParam(TypedDict, total=False):
- json_schema: Required[JsonSchema]
- """Structured Outputs configuration options, including a JSON Schema."""
-
- type: Required[Literal["json_schema"]]
- """The type of response format being defined. Always `json_schema`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_text.py b/src/digitalocean_genai_sdk/types/chat/response_format_text.py
deleted file mode 100644
index f0c8cfb7..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_text.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["ResponseFormatText"]
-
-
-class ResponseFormatText(BaseModel):
- type: Literal["text"]
- """The type of response format being defined. Always `text`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py
deleted file mode 100644
index 0d37573e..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ResponseFormatTextParam"]
-
-
-class ResponseFormatTextParam(TypedDict, total=False):
- type: Required[Literal["text"]]
- """The type of response format being defined. Always `text`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_message.py b/src/digitalocean_genai_sdk/types/chat/response_message.py
index 940adf8f..22e81c9b 100644
--- a/src/digitalocean_genai_sdk/types/chat/response_message.py
+++ b/src/digitalocean_genai_sdk/types/chat/response_message.py
@@ -1,67 +1,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
+from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
-from .message_tool_call import MessageToolCall
-__all__ = ["ResponseMessage", "Annotation", "AnnotationURLCitation", "Audio", "FunctionCall"]
-
-
-class AnnotationURLCitation(BaseModel):
- end_index: int
- """The index of the last character of the URL citation in the message."""
-
- start_index: int
- """The index of the first character of the URL citation in the message."""
-
- title: str
- """The title of the web resource."""
-
- url: str
- """The URL of the web resource."""
-
-
-class Annotation(BaseModel):
- type: Literal["url_citation"]
- """The type of the URL citation. Always `url_citation`."""
-
- url_citation: AnnotationURLCitation
- """A URL citation when using web search."""
-
-
-class Audio(BaseModel):
- id: str
- """Unique identifier for this audio response."""
-
- data: str
- """
- Base64 encoded audio bytes generated by the model, in the format specified in
- the request.
- """
-
- expires_at: int
- """
- The Unix timestamp (in seconds) for when this audio response will no longer be
- accessible on the server for use in multi-turn conversations.
- """
-
- transcript: str
- """Transcript of the audio generated by the model."""
-
-
-class FunctionCall(BaseModel):
- arguments: str
- """
- The arguments to call the function with, as generated by the model in JSON
- format. Note that the model does not always generate valid JSON, and may
- hallucinate parameters not defined by your function schema. Validate the
- arguments in your code before calling your function.
- """
-
- name: str
- """The name of the function to call."""
+__all__ = ["ResponseMessage"]
class ResponseMessage(BaseModel):
@@ -73,25 +17,3 @@ class ResponseMessage(BaseModel):
role: Literal["assistant"]
"""The role of the author of this message."""
-
- annotations: Optional[List[Annotation]] = None
- """
- Annotations for the message, when applicable, as when using the
- [web search tool](/docs/guides/tools-web-search?api-mode=chat).
- """
-
- audio: Optional[Audio] = None
- """
- If the audio output modality is requested, this object contains data about the
- audio response from the model. [Learn more](/docs/guides/audio).
- """
-
- function_call: Optional[FunctionCall] = None
- """Deprecated and replaced by `tool_calls`.
-
- The name and arguments of a function that should be called, as generated by the
- model.
- """
-
- tool_calls: Optional[List[MessageToolCall]] = None
- """The tool calls generated by the model, such as function calls."""
diff --git a/src/digitalocean_genai_sdk/types/chat/usage.py b/src/digitalocean_genai_sdk/types/chat/usage.py
index 1a7a1abf..a3785b9f 100644
--- a/src/digitalocean_genai_sdk/types/chat/usage.py
+++ b/src/digitalocean_genai_sdk/types/chat/usage.py
@@ -1,40 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
-
from ..._models import BaseModel
-__all__ = ["Usage", "CompletionTokensDetails", "PromptTokensDetails"]
-
-
-class CompletionTokensDetails(BaseModel):
- accepted_prediction_tokens: Optional[int] = None
- """
- When using Predicted Outputs, the number of tokens in the prediction that
- appeared in the completion.
- """
-
- audio_tokens: Optional[int] = None
- """Audio input tokens generated by the model."""
-
- reasoning_tokens: Optional[int] = None
- """Tokens generated by the model for reasoning."""
-
- rejected_prediction_tokens: Optional[int] = None
- """
- When using Predicted Outputs, the number of tokens in the prediction that did
- not appear in the completion. However, like reasoning tokens, these tokens are
- still counted in the total completion tokens for purposes of billing, output,
- and context window limits.
- """
-
-
-class PromptTokensDetails(BaseModel):
- audio_tokens: Optional[int] = None
- """Audio input tokens present in the prompt."""
-
- cached_tokens: Optional[int] = None
- """Cached tokens present in the prompt."""
+__all__ = ["Usage"]
class Usage(BaseModel):
@@ -46,9 +14,3 @@ class Usage(BaseModel):
total_tokens: int
"""Total number of tokens used in the request (prompt + completion)."""
-
- completion_tokens_details: Optional[CompletionTokensDetails] = None
- """Breakdown of tokens used in a completion."""
-
- prompt_tokens_details: Optional[PromptTokensDetails] = None
- """Breakdown of tokens used in the prompt."""
diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py b/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py
deleted file mode 100644
index 18b284a9..00000000
--- a/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["WebSearchContextSize"]
-
-WebSearchContextSize: TypeAlias = Literal["low", "medium", "high"]
diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_location.py b/src/digitalocean_genai_sdk/types/chat/web_search_location.py
deleted file mode 100644
index 192c4efa..00000000
--- a/src/digitalocean_genai_sdk/types/chat/web_search_location.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ..._models import BaseModel
-
-__all__ = ["WebSearchLocation"]
-
-
-class WebSearchLocation(BaseModel):
- city: Optional[str] = None
- """Free text input for the city of the user, e.g. `San Francisco`."""
-
- country: Optional[str] = None
- """
- The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
- the user, e.g. `US`.
- """
-
- region: Optional[str] = None
- """Free text input for the region of the user, e.g. `California`."""
-
- timezone: Optional[str] = None
- """
- The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
- user, e.g. `America/Los_Angeles`.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py b/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py
deleted file mode 100644
index bc4d5a4c..00000000
--- a/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["WebSearchLocationParam"]
-
-
-class WebSearchLocationParam(TypedDict, total=False):
- city: str
- """Free text input for the city of the user, e.g. `San Francisco`."""
-
- country: str
- """
- The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
- the user, e.g. `US`.
- """
-
- region: str
- """Free text input for the region of the user, e.g. `California`."""
-
- timezone: str
- """
- The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
- user, e.g. `America/Los_Angeles`.
- """
diff --git a/src/digitalocean_genai_sdk/types/comparison_filter.py b/src/digitalocean_genai_sdk/types/comparison_filter.py
deleted file mode 100644
index 547aac28..00000000
--- a/src/digitalocean_genai_sdk/types/comparison_filter.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ComparisonFilter"]
-
-
-class ComparisonFilter(BaseModel):
- key: str
- """The key to compare against the value."""
-
- type: Literal["eq", "ne", "gt", "gte", "lt", "lte"]
- """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
-
- - `eq`: equals
- - `ne`: not equal
- - `gt`: greater than
- - `gte`: greater than or equal
- - `lt`: less than
- - `lte`: less than or equal
- """
-
- value: Union[str, float, bool]
- """
- The value to compare against the attribute key; supports string, number, or
- boolean types.
- """
diff --git a/src/digitalocean_genai_sdk/types/comparison_filter_param.py b/src/digitalocean_genai_sdk/types/comparison_filter_param.py
deleted file mode 100644
index 2df2d744..00000000
--- a/src/digitalocean_genai_sdk/types/comparison_filter_param.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ComparisonFilterParam"]
-
-
-class ComparisonFilterParam(TypedDict, total=False):
- key: Required[str]
- """The key to compare against the value."""
-
- type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
- """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
-
- - `eq`: equals
- - `ne`: not equal
- - `gt`: greater than
- - `gte`: greater than or equal
- - `lt`: less than
- - `lte`: less than or equal
- """
-
- value: Required[Union[str, float, bool]]
- """
- The value to compare against the attribute key; supports string, number, or
- boolean types.
- """
diff --git a/src/digitalocean_genai_sdk/types/completion_create_params.py b/src/digitalocean_genai_sdk/types/completion_create_params.py
deleted file mode 100644
index 36709c57..00000000
--- a/src/digitalocean_genai_sdk/types/completion_create_params.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .stop_configuration_param import StopConfigurationParam
-from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
-
-__all__ = ["CompletionCreateParams"]
-
-
-class CompletionCreateParams(TypedDict, total=False):
- model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]]
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]]
- """
- The prompt(s) to generate completions for, encoded as a string, array of
- strings, array of tokens, or array of token arrays.
-
- Note that <|endoftext|> is the document separator that the model sees during
- training, so if a prompt is not specified the model will generate as if from the
- beginning of a new document.
- """
-
- best_of: Optional[int]
- """
- Generates `best_of` completions server-side and returns the "best" (the one with
- the highest log probability per token). Results cannot be streamed.
-
- When used with `n`, `best_of` controls the number of candidate completions and
- `n` specifies how many to return – `best_of` must be greater than `n`.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
- """
-
- echo: Optional[bool]
- """Echo back the prompt in addition to the completion"""
-
- frequency_penalty: Optional[float]
- """Number between -2.0 and 2.0.
-
- Positive values penalize new tokens based on their existing frequency in the
- text so far, decreasing the model's likelihood to repeat the same line verbatim.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
- """
-
- logit_bias: Optional[Dict[str, int]]
- """Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the GPT
- tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
- Mathematically, the bias is added to the logits generated by the model prior to
- sampling. The exact effect will vary per model, but values between -1 and 1
- should decrease or increase likelihood of selection; values like -100 or 100
- should result in a ban or exclusive selection of the relevant token.
-
- As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
- from being generated.
- """
-
- logprobs: Optional[int]
- """
- Include the log probabilities on the `logprobs` most likely output tokens, as
- well the chosen tokens. For example, if `logprobs` is 5, the API will return a
- list of the 5 most likely tokens. The API will always return the `logprob` of
- the sampled token, so there may be up to `logprobs+1` elements in the response.
-
- The maximum value for `logprobs` is 5.
- """
-
- max_tokens: Optional[int]
- """
- The maximum number of [tokens](/tokenizer) that can be generated in the
- completion.
-
- The token count of your prompt plus `max_tokens` cannot exceed the model's
- context length.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens.
- """
-
- n: Optional[int]
- """How many completions to generate for each prompt.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
- """
-
- presence_penalty: Optional[float]
- """Number between -2.0 and 2.0.
-
- Positive values penalize new tokens based on whether they appear in the text so
- far, increasing the model's likelihood to talk about new topics.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
- """
-
- seed: Optional[int]
- """
- If specified, our system will make a best effort to sample deterministically,
- such that repeated requests with the same `seed` and parameters should return
- the same result.
-
- Determinism is not guaranteed, and you should refer to the `system_fingerprint`
- response parameter to monitor changes in the backend.
- """
-
- stop: Optional[StopConfigurationParam]
- """Up to 4 sequences where the API will stop generating further tokens.
-
- The returned text will not contain the stop sequence.
- """
-
- stream: Optional[bool]
- """Whether to stream back partial progress.
-
- If set, tokens will be sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available, with the stream terminated by a `data: [DONE]`
- message.
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
- """
-
- stream_options: Optional[ChatCompletionStreamOptionsParam]
- """Options for streaming response. Only set this when you set `stream: true`."""
-
- suffix: Optional[str]
- """The suffix that comes after a completion of inserted text.
-
- This parameter is only supported for `gpt-3.5-turbo-instruct`.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/completion_create_response.py b/src/digitalocean_genai_sdk/types/completion_create_response.py
deleted file mode 100644
index 2e1028bf..00000000
--- a/src/digitalocean_genai_sdk/types/completion_create_response.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .chat.usage import Usage
-
-__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs"]
-
-
-class ChoiceLogprobs(BaseModel):
- text_offset: Optional[List[int]] = None
-
- token_logprobs: Optional[List[float]] = None
-
- tokens: Optional[List[str]] = None
-
- top_logprobs: Optional[List[Dict[str, float]]] = None
-
-
-class Choice(BaseModel):
- finish_reason: Literal["stop", "length", "content_filter"]
- """The reason the model stopped generating tokens.
-
- This will be `stop` if the model hit a natural stop point or a provided stop
- sequence, `length` if the maximum number of tokens specified in the request was
- reached, or `content_filter` if content was omitted due to a flag from our
- content filters.
- """
-
- index: int
-
- logprobs: Optional[ChoiceLogprobs] = None
-
- text: str
-
-
-class CompletionCreateResponse(BaseModel):
- id: str
- """A unique identifier for the completion."""
-
- choices: List[Choice]
- """The list of completion choices the model generated for the input prompt."""
-
- created: int
- """The Unix timestamp (in seconds) of when the completion was created."""
-
- model: str
- """The model used for completion."""
-
- object: Literal["text_completion"]
- """The object type, which is always "text_completion" """
-
- system_fingerprint: Optional[str] = None
- """This fingerprint represents the backend configuration that the model runs with.
-
- Can be used in conjunction with the `seed` request parameter to understand when
- backend changes have been made that might impact determinism.
- """
-
- usage: Optional[Usage] = None
- """Usage statistics for the completion request."""
diff --git a/src/digitalocean_genai_sdk/types/compound_filter.py b/src/digitalocean_genai_sdk/types/compound_filter.py
deleted file mode 100644
index bf1f793f..00000000
--- a/src/digitalocean_genai_sdk/types/compound_filter.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-from .comparison_filter import ComparisonFilter
-
-__all__ = ["CompoundFilter", "Filter"]
-
-Filter: TypeAlias = Union[ComparisonFilter, Dict[str, object]]
-
-
-class CompoundFilter(BaseModel):
- filters: List[Filter]
- """Array of filters to combine.
-
- Items can be `ComparisonFilter` or `CompoundFilter`.
- """
-
- type: Literal["and", "or"]
- """Type of operation: `and` or `or`."""
diff --git a/src/digitalocean_genai_sdk/types/compound_filter_param.py b/src/digitalocean_genai_sdk/types/compound_filter_param.py
deleted file mode 100644
index 1f66a965..00000000
--- a/src/digitalocean_genai_sdk/types/compound_filter_param.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .comparison_filter_param import ComparisonFilterParam
-
-__all__ = ["CompoundFilterParam", "Filter"]
-
-Filter: TypeAlias = Union[ComparisonFilterParam, Dict[str, object]]
-
-
-class CompoundFilterParam(TypedDict, total=False):
- filters: Required[Iterable[Filter]]
- """Array of filters to combine.
-
- Items can be `ComparisonFilter` or `CompoundFilter`.
- """
-
- type: Required[Literal["and", "or"]]
- """Type of operation: `and` or `or`."""
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call.py b/src/digitalocean_genai_sdk/types/computer_tool_call.py
deleted file mode 100644
index b127e694..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck
-
-__all__ = [
- "ComputerToolCall",
- "Action",
- "ActionClick",
- "ActionDoubleClick",
- "ActionDrag",
- "ActionDragPath",
- "ActionKeyPress",
- "ActionMove",
- "ActionScreenshot",
- "ActionScroll",
- "ActionType",
- "ActionWait",
-]
-
-
-class ActionClick(BaseModel):
- button: Literal["left", "right", "wheel", "back", "forward"]
- """Indicates which mouse button was pressed during the click.
-
- One of `left`, `right`, `wheel`, `back`, or `forward`.
- """
-
- type: Literal["click"]
- """Specifies the event type.
-
- For a click action, this property is always set to `click`.
- """
-
- x: int
- """The x-coordinate where the click occurred."""
-
- y: int
- """The y-coordinate where the click occurred."""
-
-
-class ActionDoubleClick(BaseModel):
- type: Literal["double_click"]
- """Specifies the event type.
-
- For a double click action, this property is always set to `double_click`.
- """
-
- x: int
- """The x-coordinate where the double click occurred."""
-
- y: int
- """The y-coordinate where the double click occurred."""
-
-
-class ActionDragPath(BaseModel):
- x: int
- """The x-coordinate."""
-
- y: int
- """The y-coordinate."""
-
-
-class ActionDrag(BaseModel):
- path: List[ActionDragPath]
- """An array of coordinates representing the path of the drag action.
-
- Coordinates will appear as an array of objects, eg
-
- ```
- [
-
- { x: 100, y: 200 },
- { x: 200, y: 300 }
- ]
- ```
- """
-
- type: Literal["drag"]
- """Specifies the event type.
-
- For a drag action, this property is always set to `drag`.
- """
-
-
-class ActionKeyPress(BaseModel):
- keys: List[str]
- """The combination of keys the model is requesting to be pressed.
-
- This is an array of strings, each representing a key.
- """
-
- type: Literal["keypress"]
- """Specifies the event type.
-
- For a keypress action, this property is always set to `keypress`.
- """
-
-
-class ActionMove(BaseModel):
- type: Literal["move"]
- """Specifies the event type.
-
- For a move action, this property is always set to `move`.
- """
-
- x: int
- """The x-coordinate to move to."""
-
- y: int
- """The y-coordinate to move to."""
-
-
-class ActionScreenshot(BaseModel):
- type: Literal["screenshot"]
- """Specifies the event type.
-
- For a screenshot action, this property is always set to `screenshot`.
- """
-
-
-class ActionScroll(BaseModel):
- scroll_x: int
- """The horizontal scroll distance."""
-
- scroll_y: int
- """The vertical scroll distance."""
-
- type: Literal["scroll"]
- """Specifies the event type.
-
- For a scroll action, this property is always set to `scroll`.
- """
-
- x: int
- """The x-coordinate where the scroll occurred."""
-
- y: int
- """The y-coordinate where the scroll occurred."""
-
-
-class ActionType(BaseModel):
- text: str
- """The text to type."""
-
- type: Literal["type"]
- """Specifies the event type.
-
- For a type action, this property is always set to `type`.
- """
-
-
-class ActionWait(BaseModel):
- type: Literal["wait"]
- """Specifies the event type.
-
- For a wait action, this property is always set to `wait`.
- """
-
-
-Action: TypeAlias = Union[
- ActionClick,
- ActionDoubleClick,
- ActionDrag,
- ActionKeyPress,
- ActionMove,
- ActionScreenshot,
- ActionScroll,
- ActionType,
- ActionWait,
-]
-
-
-class ComputerToolCall(BaseModel):
- id: str
- """The unique ID of the computer call."""
-
- action: Action
- """A click action."""
-
- call_id: str
- """An identifier used when responding to the tool call with output."""
-
- pending_safety_checks: List[ComputerToolCallSafetyCheck]
- """The pending safety checks for the computer call."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
-
- type: Literal["computer_call"]
- """The type of the computer call. Always `computer_call`."""
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_output.py b/src/digitalocean_genai_sdk/types/computer_tool_call_output.py
deleted file mode 100644
index 0133a29a..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_output.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck
-
-__all__ = ["ComputerToolCallOutput", "Output"]
-
-
-class Output(BaseModel):
- type: Literal["computer_screenshot"]
- """Specifies the event type.
-
- For a computer screenshot, this property is always set to `computer_screenshot`.
- """
-
- file_id: Optional[str] = None
- """The identifier of an uploaded file that contains the screenshot."""
-
- image_url: Optional[str] = None
- """The URL of the screenshot image."""
-
-
-class ComputerToolCallOutput(BaseModel):
- call_id: str
- """The ID of the computer tool call that produced the output."""
-
- output: Output
- """A computer screenshot image used with the computer use tool."""
-
- type: Literal["computer_call_output"]
- """The type of the computer tool call output. Always `computer_call_output`."""
-
- id: Optional[str] = None
- """The ID of the computer tool call output."""
-
- acknowledged_safety_checks: Optional[List[ComputerToolCallSafetyCheck]] = None
- """
- The safety checks reported by the API that have been acknowledged by the
- developer.
- """
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py
deleted file mode 100644
index 764c4da8..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam
-
-__all__ = ["ComputerToolCallOutputParam", "Output"]
-
-
-class Output(TypedDict, total=False):
- type: Required[Literal["computer_screenshot"]]
- """Specifies the event type.
-
- For a computer screenshot, this property is always set to `computer_screenshot`.
- """
-
- file_id: str
- """The identifier of an uploaded file that contains the screenshot."""
-
- image_url: str
- """The URL of the screenshot image."""
-
-
-class ComputerToolCallOutputParam(TypedDict, total=False):
- call_id: Required[str]
- """The ID of the computer tool call that produced the output."""
-
- output: Required[Output]
- """A computer screenshot image used with the computer use tool."""
-
- type: Required[Literal["computer_call_output"]]
- """The type of the computer tool call output. Always `computer_call_output`."""
-
- id: str
- """The ID of the computer tool call output."""
-
- acknowledged_safety_checks: Iterable[ComputerToolCallSafetyCheckParam]
- """
- The safety checks reported by the API that have been acknowledged by the
- developer.
- """
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_param.py
deleted file mode 100644
index 7fb87bfa..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_param.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam
-
-__all__ = [
- "ComputerToolCallParam",
- "Action",
- "ActionClick",
- "ActionDoubleClick",
- "ActionDrag",
- "ActionDragPath",
- "ActionKeyPress",
- "ActionMove",
- "ActionScreenshot",
- "ActionScroll",
- "ActionType",
- "ActionWait",
-]
-
-
-class ActionClick(TypedDict, total=False):
- button: Required[Literal["left", "right", "wheel", "back", "forward"]]
- """Indicates which mouse button was pressed during the click.
-
- One of `left`, `right`, `wheel`, `back`, or `forward`.
- """
-
- type: Required[Literal["click"]]
- """Specifies the event type.
-
- For a click action, this property is always set to `click`.
- """
-
- x: Required[int]
- """The x-coordinate where the click occurred."""
-
- y: Required[int]
- """The y-coordinate where the click occurred."""
-
-
-class ActionDoubleClick(TypedDict, total=False):
- type: Required[Literal["double_click"]]
- """Specifies the event type.
-
- For a double click action, this property is always set to `double_click`.
- """
-
- x: Required[int]
- """The x-coordinate where the double click occurred."""
-
- y: Required[int]
- """The y-coordinate where the double click occurred."""
-
-
-class ActionDragPath(TypedDict, total=False):
- x: Required[int]
- """The x-coordinate."""
-
- y: Required[int]
- """The y-coordinate."""
-
-
-class ActionDrag(TypedDict, total=False):
- path: Required[Iterable[ActionDragPath]]
- """An array of coordinates representing the path of the drag action.
-
- Coordinates will appear as an array of objects, eg
-
- ```
- [
-
- { x: 100, y: 200 },
- { x: 200, y: 300 }
- ]
- ```
- """
-
- type: Required[Literal["drag"]]
- """Specifies the event type.
-
- For a drag action, this property is always set to `drag`.
- """
-
-
-class ActionKeyPress(TypedDict, total=False):
- keys: Required[List[str]]
- """The combination of keys the model is requesting to be pressed.
-
- This is an array of strings, each representing a key.
- """
-
- type: Required[Literal["keypress"]]
- """Specifies the event type.
-
- For a keypress action, this property is always set to `keypress`.
- """
-
-
-class ActionMove(TypedDict, total=False):
- type: Required[Literal["move"]]
- """Specifies the event type.
-
- For a move action, this property is always set to `move`.
- """
-
- x: Required[int]
- """The x-coordinate to move to."""
-
- y: Required[int]
- """The y-coordinate to move to."""
-
-
-class ActionScreenshot(TypedDict, total=False):
- type: Required[Literal["screenshot"]]
- """Specifies the event type.
-
- For a screenshot action, this property is always set to `screenshot`.
- """
-
-
-class ActionScroll(TypedDict, total=False):
- scroll_x: Required[int]
- """The horizontal scroll distance."""
-
- scroll_y: Required[int]
- """The vertical scroll distance."""
-
- type: Required[Literal["scroll"]]
- """Specifies the event type.
-
- For a scroll action, this property is always set to `scroll`.
- """
-
- x: Required[int]
- """The x-coordinate where the scroll occurred."""
-
- y: Required[int]
- """The y-coordinate where the scroll occurred."""
-
-
-class ActionType(TypedDict, total=False):
- text: Required[str]
- """The text to type."""
-
- type: Required[Literal["type"]]
- """Specifies the event type.
-
- For a type action, this property is always set to `type`.
- """
-
-
-class ActionWait(TypedDict, total=False):
- type: Required[Literal["wait"]]
- """Specifies the event type.
-
- For a wait action, this property is always set to `wait`.
- """
-
-
-Action: TypeAlias = Union[
- ActionClick,
- ActionDoubleClick,
- ActionDrag,
- ActionKeyPress,
- ActionMove,
- ActionScreenshot,
- ActionScroll,
- ActionType,
- ActionWait,
-]
-
-
-class ComputerToolCallParam(TypedDict, total=False):
- id: Required[str]
- """The unique ID of the computer call."""
-
- action: Required[Action]
- """A click action."""
-
- call_id: Required[str]
- """An identifier used when responding to the tool call with output."""
-
- pending_safety_checks: Required[Iterable[ComputerToolCallSafetyCheckParam]]
- """The pending safety checks for the computer call."""
-
- status: Required[Literal["in_progress", "completed", "incomplete"]]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
-
- type: Required[Literal["computer_call"]]
- """The type of the computer call. Always `computer_call`."""
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py
deleted file mode 100644
index e24b9f35..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["ComputerToolCallSafetyCheck"]
-
-
-class ComputerToolCallSafetyCheck(BaseModel):
- id: str
- """The ID of the pending safety check."""
-
- code: str
- """The type of the pending safety check."""
-
- message: str
- """Details about the pending safety check."""
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py
deleted file mode 100644
index 859d6b59..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ComputerToolCallSafetyCheckParam"]
-
-
-class ComputerToolCallSafetyCheckParam(TypedDict, total=False):
- id: Required[str]
- """The ID of the pending safety check."""
-
- code: Required[str]
- """The type of the pending safety check."""
-
- message: Required[str]
- """Details about the pending safety check."""
diff --git a/src/digitalocean_genai_sdk/types/create_thread_request_param.py b/src/digitalocean_genai_sdk/types/create_thread_request_param.py
deleted file mode 100644
index 3a8f59b4..00000000
--- a/src/digitalocean_genai_sdk/types/create_thread_request_param.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .threads.create_message_request_param import CreateMessageRequestParam
-
-__all__ = [
- "CreateThreadRequestParam",
- "ToolResources",
- "ToolResourcesCodeInterpreter",
- "ToolResourcesFileSearch",
- "ToolResourcesFileSearchVectorStore",
- "ToolResourcesFileSearchVectorStoreChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic",
-]
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False):
- type: Required[Literal["auto"]]
- """Always `auto`."""
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False):
- chunk_overlap_tokens: Required[int]
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: Required[int]
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False):
- static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic]
-
- type: Required[Literal["static"]]
- """Always `static`."""
-
-
-ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
- ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy,
- ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy,
-]
-
-
-class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
- chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
-
- file_ids: List[str]
- """A list of [file](/docs/api-reference/files) IDs to add to the vector store.
-
- There can be a maximum of 10000 files in a vector store.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- thread. There can be a maximum of 1 vector store attached to the thread.
- """
-
- vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
- """
- A helper to create a [vector store](/docs/api-reference/vector-stores/object)
- with file_ids and attach it to this thread. There can be a maximum of 1 vector
- store attached to the thread.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
-
-
-class CreateThreadRequestParam(TypedDict, total=False):
- messages: Iterable[CreateMessageRequestParam]
- """A list of [messages](/docs/api-reference/messages) to start the thread with."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- tool_resources: Optional[ToolResources]
- """
- A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
diff --git a/src/digitalocean_genai_sdk/types/embedding_create_params.py b/src/digitalocean_genai_sdk/types/embedding_create_params.py
index caf65415..d3e923ad 100644
--- a/src/digitalocean_genai_sdk/types/embedding_create_params.py
+++ b/src/digitalocean_genai_sdk/types/embedding_create_params.py
@@ -2,47 +2,27 @@
from __future__ import annotations
-from typing import List, Union, Iterable
-from typing_extensions import Literal, Required, TypedDict
+from typing import List, Union
+from typing_extensions import Required, TypedDict
__all__ = ["EmbeddingCreateParams"]
class EmbeddingCreateParams(TypedDict, total=False):
- input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]]
+ input: Required[Union[str, List[str]]]
"""Input text to embed, encoded as a string or array of tokens.
- To embed multiple inputs in a single request, pass an array of strings or array
- of token arrays. The input must not exceed the max input tokens for the model
- (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any
- array must be 2048 dimensions or less.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens. Some models may also impose a limit on total number of
- tokens summed across inputs.
+ To embed multiple inputs in a single request, pass an array of strings.
"""
- model: Required[Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]]]
+ model: Required[str]
"""ID of the model to use.
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- dimensions: int
- """The number of dimensions the resulting output embeddings should have.
-
- Only supported in `text-embedding-3` and later models.
- """
-
- encoding_format: Literal["float", "base64"]
- """The format to return the embeddings in.
-
- Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).
+ You can use the List models API to see all of your available models.
"""
user: str
"""
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
+ A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
"""
diff --git a/src/digitalocean_genai_sdk/types/embedding_create_response.py b/src/digitalocean_genai_sdk/types/embedding_create_response.py
index e85daaba..19c474fd 100644
--- a/src/digitalocean_genai_sdk/types/embedding_create_response.py
+++ b/src/digitalocean_genai_sdk/types/embedding_create_response.py
@@ -10,11 +10,7 @@
class Data(BaseModel):
embedding: List[float]
- """The embedding vector, which is a list of floats.
-
- The length of vector depends on the model as listed in the
- [embedding guide](/docs/guides/embeddings).
- """
+ """The embedding vector, which is a list of floats."""
index: int
"""The index of the embedding in the list of embeddings."""
diff --git a/src/digitalocean_genai_sdk/types/file_delete_response.py b/src/digitalocean_genai_sdk/types/file_delete_response.py
deleted file mode 100644
index 26e2e053..00000000
--- a/src/digitalocean_genai_sdk/types/file_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FileDeleteResponse"]
-
-
-class FileDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["file"]
diff --git a/src/digitalocean_genai_sdk/types/file_list_params.py b/src/digitalocean_genai_sdk/types/file_list_params.py
deleted file mode 100644
index 058d874c..00000000
--- a/src/digitalocean_genai_sdk/types/file_list_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["FileListParams"]
-
-
-class FileListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 10,000, and the default is 10,000.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
-
- purpose: str
- """Only return files with the given purpose."""
diff --git a/src/digitalocean_genai_sdk/types/file_list_response.py b/src/digitalocean_genai_sdk/types/file_list_response.py
deleted file mode 100644
index db9ef641..00000000
--- a/src/digitalocean_genai_sdk/types/file_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-from .openai_file import OpenAIFile
-
-__all__ = ["FileListResponse"]
-
-
-class FileListResponse(BaseModel):
- data: List[OpenAIFile]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py b/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py
deleted file mode 100644
index 20c945db..00000000
--- a/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import TypeAlias
-
-__all__ = ["FileRetrieveContentResponse"]
-
-FileRetrieveContentResponse: TypeAlias = str
diff --git a/src/digitalocean_genai_sdk/types/file_search_ranker.py b/src/digitalocean_genai_sdk/types/file_search_ranker.py
deleted file mode 100644
index d4aabe5a..00000000
--- a/src/digitalocean_genai_sdk/types/file_search_ranker.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["FileSearchRanker"]
-
-FileSearchRanker: TypeAlias = Literal["auto", "default_2024_08_21"]
diff --git a/src/digitalocean_genai_sdk/types/file_search_tool_call.py b/src/digitalocean_genai_sdk/types/file_search_tool_call.py
deleted file mode 100644
index 04542379..00000000
--- a/src/digitalocean_genai_sdk/types/file_search_tool_call.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FileSearchToolCall", "Result"]
-
-
-class Result(BaseModel):
- attributes: Optional[Dict[str, Union[str, float, bool]]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- file_id: Optional[str] = None
- """The unique ID of the file."""
-
- filename: Optional[str] = None
- """The name of the file."""
-
- score: Optional[float] = None
- """The relevance score of the file - a value between 0 and 1."""
-
- text: Optional[str] = None
- """The text that was retrieved from the file."""
-
-
-class FileSearchToolCall(BaseModel):
- id: str
- """The unique ID of the file search tool call."""
-
- queries: List[str]
- """The queries used to search for files."""
-
- status: Literal["in_progress", "searching", "completed", "incomplete", "failed"]
- """The status of the file search tool call.
-
- One of `in_progress`, `searching`, `incomplete` or `failed`,
- """
-
- type: Literal["file_search_call"]
- """The type of the file search tool call. Always `file_search_call`."""
-
- results: Optional[List[Result]] = None
- """The results of the file search tool call."""
diff --git a/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py b/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py
deleted file mode 100644
index 315dc90e..00000000
--- a/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FileSearchToolCallParam", "Result"]
-
-
-class Result(TypedDict, total=False):
- attributes: Optional[Dict[str, Union[str, float, bool]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- file_id: str
- """The unique ID of the file."""
-
- filename: str
- """The name of the file."""
-
- score: float
- """The relevance score of the file - a value between 0 and 1."""
-
- text: str
- """The text that was retrieved from the file."""
-
-
-class FileSearchToolCallParam(TypedDict, total=False):
- id: Required[str]
- """The unique ID of the file search tool call."""
-
- queries: Required[List[str]]
- """The queries used to search for files."""
-
- status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]]
- """The status of the file search tool call.
-
- One of `in_progress`, `searching`, `incomplete` or `failed`,
- """
-
- type: Required[Literal["file_search_call"]]
- """The type of the file search tool call. Always `file_search_call`."""
-
- results: Optional[Iterable[Result]]
- """The results of the file search tool call."""
diff --git a/src/digitalocean_genai_sdk/types/file_upload_params.py b/src/digitalocean_genai_sdk/types/file_upload_params.py
deleted file mode 100644
index 5b42fc50..00000000
--- a/src/digitalocean_genai_sdk/types/file_upload_params.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["FileUploadParams"]
-
-
-class FileUploadParams(TypedDict, total=False):
- file: Required[FileTypes]
- """The File object (not file name) to be uploaded."""
-
- purpose: Required[Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"]]
- """The intended purpose of the uploaded file.
-
- One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch
- API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision
- fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used
- for eval data sets
- """
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py
index 6b7dcea7..f8ee8b14 100644
--- a/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py
+++ b/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py
@@ -1,10 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
-
-from .fine_tuning_job import FineTuningJob as FineTuningJob
-from .job_list_params import JobListParams as JobListParams
-from .fine_tune_method import FineTuneMethod as FineTuneMethod
-from .job_create_params import JobCreateParams as JobCreateParams
-from .job_list_response import JobListResponse as JobListResponse
-from .fine_tune_method_param import FineTuneMethodParam as FineTuneMethodParam
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py
index 6b30e048..f8ee8b14 100644
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py
+++ b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py
@@ -1,10 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
-
-from .permission_create_params import PermissionCreateParams as PermissionCreateParams
-from .permission_delete_response import PermissionDeleteResponse as PermissionDeleteResponse
-from .permission_retrieve_params import PermissionRetrieveParams as PermissionRetrieveParams
-from .list_fine_tuning_checkpoint_permission import (
- ListFineTuningCheckpointPermission as ListFineTuningCheckpointPermission,
-)
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py
deleted file mode 100644
index 9136bf5d..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ListFineTuningCheckpointPermission", "Data"]
-
-
-class Data(BaseModel):
- id: str
- """The permission identifier, which can be referenced in the API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the permission was created."""
-
- object: Literal["checkpoint.permission"]
- """The object type, which is always "checkpoint.permission"."""
-
- project_id: str
- """The project identifier that the permission is for."""
-
-
-class ListFineTuningCheckpointPermission(BaseModel):
- data: List[Data]
-
- has_more: bool
-
- object: Literal["list"]
-
- first_id: Optional[str] = None
-
- last_id: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py
deleted file mode 100644
index 92f98f21..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Required, TypedDict
-
-__all__ = ["PermissionCreateParams"]
-
-
-class PermissionCreateParams(TypedDict, total=False):
- project_ids: Required[List[str]]
- """The project identifiers to grant access to."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py
deleted file mode 100644
index 1a92d912..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["PermissionDeleteResponse"]
-
-
-class PermissionDeleteResponse(BaseModel):
- id: str
- """The ID of the fine-tuned model checkpoint permission that was deleted."""
-
- deleted: bool
- """Whether the fine-tuned model checkpoint permission was successfully deleted."""
-
- object: Literal["checkpoint.permission"]
- """The object type, which is always "checkpoint.permission"."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py
deleted file mode 100644
index 6e66a867..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["PermissionRetrieveParams"]
-
-
-class PermissionRetrieveParams(TypedDict, total=False):
- after: str
- """Identifier for the last permission ID from the previous pagination request."""
-
- limit: int
- """Number of permissions to retrieve."""
-
- order: Literal["ascending", "descending"]
- """The order in which to retrieve permissions."""
-
- project_id: str
- """The ID of the project to get permissions for."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py
deleted file mode 100644
index 6ad8f7a5..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["FineTuneMethod", "Dpo", "DpoHyperparameters", "Supervised", "SupervisedHyperparameters"]
-
-
-class DpoHyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, None] = None
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- beta: Union[Literal["auto"], float, None] = None
- """The beta value for the DPO method.
-
- A higher beta value will increase the weight of the penalty between the policy
- and reference model.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float, None] = None
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int, None] = None
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class Dpo(BaseModel):
- hyperparameters: Optional[DpoHyperparameters] = None
- """The hyperparameters used for the fine-tuning job."""
-
-
-class SupervisedHyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, None] = None
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float, None] = None
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int, None] = None
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class Supervised(BaseModel):
- hyperparameters: Optional[SupervisedHyperparameters] = None
- """The hyperparameters used for the fine-tuning job."""
-
-
-class FineTuneMethod(BaseModel):
- dpo: Optional[Dpo] = None
- """Configuration for the DPO fine-tuning method."""
-
- supervised: Optional[Supervised] = None
- """Configuration for the supervised fine-tuning method."""
-
- type: Optional[Literal["supervised", "dpo"]] = None
- """The type of method. Is either `supervised` or `dpo`."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py
deleted file mode 100644
index e28abc93..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["FineTuneMethodParam", "Dpo", "DpoHyperparameters", "Supervised", "SupervisedHyperparameters"]
-
-
-class DpoHyperparameters(TypedDict, total=False):
- batch_size: Union[Literal["auto"], int]
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- beta: Union[Literal["auto"], float]
- """The beta value for the DPO method.
-
- A higher beta value will increase the weight of the penalty between the policy
- and reference model.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float]
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class Dpo(TypedDict, total=False):
- hyperparameters: DpoHyperparameters
- """The hyperparameters used for the fine-tuning job."""
-
-
-class SupervisedHyperparameters(TypedDict, total=False):
- batch_size: Union[Literal["auto"], int]
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float]
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class Supervised(TypedDict, total=False):
- hyperparameters: SupervisedHyperparameters
- """The hyperparameters used for the fine-tuning job."""
-
-
-class FineTuneMethodParam(TypedDict, total=False):
- dpo: Dpo
- """Configuration for the DPO fine-tuning method."""
-
- supervised: Supervised
- """Configuration for the supervised fine-tuning method."""
-
- type: Literal["supervised", "dpo"]
- """The type of method. Is either `supervised` or `dpo`."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py
deleted file mode 100644
index 29f387a1..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .fine_tune_method import FineTuneMethod
-
-__all__ = ["FineTuningJob", "Error", "Hyperparameters", "Integration", "IntegrationWandb"]
-
-
-class Error(BaseModel):
- code: str
- """A machine-readable error code."""
-
- message: str
- """A human-readable error message."""
-
- param: Optional[str] = None
- """The parameter that was invalid, usually `training_file` or `validation_file`.
-
- This field will be null if the failure was not parameter-specific.
- """
-
-
-class Hyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, None] = None
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float, None] = None
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int, None] = None
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class IntegrationWandb(BaseModel):
- project: str
- """The name of the project that the new run will be created under."""
-
- entity: Optional[str] = None
- """The entity to use for the run.
-
- This allows you to set the team or username of the WandB user that you would
- like associated with the run. If not set, the default entity for the registered
- WandB API key is used.
- """
-
- name: Optional[str] = None
- """A display name to set for the run.
-
- If not set, we will use the Job ID as the name.
- """
-
- tags: Optional[List[str]] = None
- """A list of tags to be attached to the newly created run.
-
- These tags are passed through directly to WandB. Some default tags are generated
- by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
- """
-
-
-class Integration(BaseModel):
- type: Literal["wandb"]
- """The type of the integration being enabled for the fine-tuning job"""
-
- wandb: IntegrationWandb
- """The settings for your integration with Weights and Biases.
-
- This payload specifies the project that metrics will be sent to. Optionally, you
- can set an explicit display name for your run, add tags to your run, and set a
- default entity (team, username, etc) to be associated with your run.
- """
-
-
-class FineTuningJob(BaseModel):
- id: str
- """The object identifier, which can be referenced in the API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
-
- error: Optional[Error] = None
- """
- For fine-tuning jobs that have `failed`, this will contain more information on
- the cause of the failure.
- """
-
- fine_tuned_model: Optional[str] = None
- """The name of the fine-tuned model that is being created.
-
- The value will be null if the fine-tuning job is still running.
- """
-
- finished_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the fine-tuning job was finished.
-
- The value will be null if the fine-tuning job is still running.
- """
-
- hyperparameters: Hyperparameters
- """The hyperparameters used for the fine-tuning job.
-
- This value will only be returned when running `supervised` jobs.
- """
-
- model: str
- """The base model that is being fine-tuned."""
-
- object: Literal["fine_tuning.job"]
- """The object type, which is always "fine_tuning.job"."""
-
- organization_id: str
- """The organization that owns the fine-tuning job."""
-
- result_files: List[str]
- """The compiled results file ID(s) for the fine-tuning job.
-
- You can retrieve the results with the
- [Files API](/docs/api-reference/files/retrieve-contents).
- """
-
- seed: int
- """The seed used for the fine-tuning job."""
-
- status: Literal["validating_files", "queued", "running", "succeeded", "failed", "cancelled"]
- """
- The current status of the fine-tuning job, which can be either
- `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
- """
-
- trained_tokens: Optional[int] = None
- """The total number of billable tokens processed by this fine-tuning job.
-
- The value will be null if the fine-tuning job is still running.
- """
-
- training_file: str
- """The file ID used for training.
-
- You can retrieve the training data with the
- [Files API](/docs/api-reference/files/retrieve-contents).
- """
-
- validation_file: Optional[str] = None
- """The file ID used for validation.
-
- You can retrieve the validation results with the
- [Files API](/docs/api-reference/files/retrieve-contents).
- """
-
- estimated_finish: Optional[int] = None
- """
- The Unix timestamp (in seconds) for when the fine-tuning job is estimated to
- finish. The value will be null if the fine-tuning job is not running.
- """
-
- integrations: Optional[List[Integration]] = None
- """A list of integrations to enable for this fine-tuning job."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- method: Optional[FineTuneMethod] = None
- """The method used for fine-tuning."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py
deleted file mode 100644
index a538e659..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .fine_tune_method_param import FineTuneMethodParam
-
-__all__ = ["JobCreateParams", "Hyperparameters", "Integration", "IntegrationWandb"]
-
-
-class JobCreateParams(TypedDict, total=False):
- model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]]
- """The name of the model to fine-tune.
-
- You can select one of the
- [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
- """
-
- training_file: Required[str]
- """The ID of an uploaded file that contains training data.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your dataset must be formatted as a JSONL file. Additionally, you must upload
- your file with the purpose `fine-tune`.
-
- The contents of the file should differ depending on if the model uses the
- [chat](/docs/api-reference/fine-tuning/chat-input),
- [completions](/docs/api-reference/fine-tuning/completions-input) format, or if
- the fine-tuning method uses the
- [preference](/docs/api-reference/fine-tuning/preference-input) format.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
- """
-
- hyperparameters: Hyperparameters
- """
- The hyperparameters used for the fine-tuning job. This value is now deprecated
- in favor of `method`, and should be passed in under the `method` parameter.
- """
-
- integrations: Optional[Iterable[Integration]]
- """A list of integrations to enable for your fine-tuning job."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- method: FineTuneMethodParam
- """The method used for fine-tuning."""
-
- seed: Optional[int]
- """The seed controls the reproducibility of the job.
-
- Passing in the same seed and job parameters should produce the same results, but
- may differ in rare cases. If a seed is not specified, one will be generated for
- you.
- """
-
- suffix: Optional[str]
- """
- A string of up to 64 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
- """
-
- validation_file: Optional[str]
- """The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the fine-tuning
- results file. The same data should not be present in both train and validation
- files.
-
- Your dataset must be formatted as a JSONL file. You must upload your file with
- the purpose `fine-tune`.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
- """
-
-
-class Hyperparameters(TypedDict, total=False):
- batch_size: Union[Literal["auto"], int]
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float]
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class IntegrationWandb(TypedDict, total=False):
- project: Required[str]
- """The name of the project that the new run will be created under."""
-
- entity: Optional[str]
- """The entity to use for the run.
-
- This allows you to set the team or username of the WandB user that you would
- like associated with the run. If not set, the default entity for the registered
- WandB API key is used.
- """
-
- name: Optional[str]
- """A display name to set for the run.
-
- If not set, we will use the Job ID as the name.
- """
-
- tags: List[str]
- """A list of tags to be attached to the newly created run.
-
- These tags are passed through directly to WandB. Some default tags are generated
- by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
- """
-
-
-class Integration(TypedDict, total=False):
- type: Required[Literal["wandb"]]
- """The type of integration to enable.
-
- Currently, only "wandb" (Weights and Biases) is supported.
- """
-
- wandb: Required[IntegrationWandb]
- """The settings for your integration with Weights and Biases.
-
- This payload specifies the project that metrics will be sent to. Optionally, you
- can set an explicit display name for your run, add tags to your run, and set a
- default entity (team, username, etc) to be associated with your run.
- """
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py
deleted file mode 100644
index b79f3ce8..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import TypedDict
-
-__all__ = ["JobListParams"]
-
-
-class JobListParams(TypedDict, total=False):
- after: str
- """Identifier for the last job from the previous pagination request."""
-
- limit: int
- """Number of fine-tuning jobs to retrieve."""
-
- metadata: Optional[Dict[str, str]]
- """Optional metadata filter.
-
- To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to
- indicate no metadata.
- """
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py
deleted file mode 100644
index ea6eb6a8..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .fine_tuning_job import FineTuningJob
-
-__all__ = ["JobListResponse"]
-
-
-class JobListResponse(BaseModel):
- data: List[FineTuningJob]
-
- has_more: bool
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py
index 9ba11022..f8ee8b14 100644
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py
+++ b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py
@@ -1,8 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
-
-from .event_retrieve_params import EventRetrieveParams as EventRetrieveParams
-from .event_retrieve_response import EventRetrieveResponse as EventRetrieveResponse
-from .checkpoint_retrieve_params import CheckpointRetrieveParams as CheckpointRetrieveParams
-from .checkpoint_retrieve_response import CheckpointRetrieveResponse as CheckpointRetrieveResponse
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py
deleted file mode 100644
index 34666a9f..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["CheckpointRetrieveParams"]
-
-
-class CheckpointRetrieveParams(TypedDict, total=False):
- after: str
- """Identifier for the last checkpoint ID from the previous pagination request."""
-
- limit: int
- """Number of checkpoints to retrieve."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py
deleted file mode 100644
index bf0af44d..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["CheckpointRetrieveResponse", "Data", "DataMetrics"]
-
-
-class DataMetrics(BaseModel):
- full_valid_loss: Optional[float] = None
-
- full_valid_mean_token_accuracy: Optional[float] = None
-
- step: Optional[float] = None
-
- train_loss: Optional[float] = None
-
- train_mean_token_accuracy: Optional[float] = None
-
- valid_loss: Optional[float] = None
-
- valid_mean_token_accuracy: Optional[float] = None
-
-
-class Data(BaseModel):
- id: str
- """The checkpoint identifier, which can be referenced in the API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the checkpoint was created."""
-
- fine_tuned_model_checkpoint: str
- """The name of the fine-tuned checkpoint model that is created."""
-
- fine_tuning_job_id: str
- """The name of the fine-tuning job that this checkpoint was created from."""
-
- metrics: DataMetrics
- """Metrics at the step number during the fine-tuning job."""
-
- object: Literal["fine_tuning.job.checkpoint"]
- """The object type, which is always "fine_tuning.job.checkpoint"."""
-
- step_number: int
- """The step number that the checkpoint was created at."""
-
-
-class CheckpointRetrieveResponse(BaseModel):
- data: List[Data]
-
- has_more: bool
-
- object: Literal["list"]
-
- first_id: Optional[str] = None
-
- last_id: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py
deleted file mode 100644
index f0162e0e..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["EventRetrieveParams"]
-
-
-class EventRetrieveParams(TypedDict, total=False):
- after: str
- """Identifier for the last event from the previous pagination request."""
-
- limit: int
- """Number of events to retrieve."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py
deleted file mode 100644
index 8c22fe30..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import builtins
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["EventRetrieveResponse", "Data"]
-
-
-class Data(BaseModel):
- id: str
- """The object identifier."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
-
- level: Literal["info", "warn", "error"]
- """The log level of the event."""
-
- message: str
- """The message of the event."""
-
- object: Literal["fine_tuning.job.event"]
- """The object type, which is always "fine_tuning.job.event"."""
-
- data: Optional[builtins.object] = None
- """The data associated with the event."""
-
- type: Optional[Literal["message", "metrics"]] = None
- """The type of event."""
-
-
-class EventRetrieveResponse(BaseModel):
- data: List[Data]
-
- has_more: bool
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/function_object.py b/src/digitalocean_genai_sdk/types/function_object.py
deleted file mode 100644
index 4fe27f86..00000000
--- a/src/digitalocean_genai_sdk/types/function_object.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-
-from .._models import BaseModel
-
-__all__ = ["FunctionObject"]
-
-
-class FunctionObject(BaseModel):
- name: str
- """The name of the function to be called.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: Optional[str] = None
- """
- A description of what the function does, used by the model to choose when and
- how to call the function.
- """
-
- parameters: Optional[Dict[str, object]] = None
- """The parameters the functions accepts, described as a JSON Schema object.
-
- See the [guide](/docs/guides/function-calling) for examples, and the
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
- documentation about the format.
-
- Omitting `parameters` defines a function with an empty parameter list.
- """
-
- strict: Optional[bool] = None
- """Whether to enable strict schema adherence when generating the function call.
-
- If set to true, the model will follow the exact schema defined in the
- `parameters` field. Only a subset of JSON Schema is supported when `strict` is
- `true`. Learn more about Structured Outputs in the
- [function calling guide](docs/guides/function-calling).
- """
diff --git a/src/digitalocean_genai_sdk/types/function_object_param.py b/src/digitalocean_genai_sdk/types/function_object_param.py
deleted file mode 100644
index 1a358408..00000000
--- a/src/digitalocean_genai_sdk/types/function_object_param.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["FunctionObjectParam"]
-
-
-class FunctionObjectParam(TypedDict, total=False):
- name: Required[str]
- """The name of the function to be called.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: str
- """
- A description of what the function does, used by the model to choose when and
- how to call the function.
- """
-
- parameters: Dict[str, object]
- """The parameters the functions accepts, described as a JSON Schema object.
-
- See the [guide](/docs/guides/function-calling) for examples, and the
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
- documentation about the format.
-
- Omitting `parameters` defines a function with an empty parameter list.
- """
-
- strict: Optional[bool]
- """Whether to enable strict schema adherence when generating the function call.
-
- If set to true, the model will follow the exact schema defined in the
- `parameters` field. Only a subset of JSON Schema is supported when `strict` is
- `true`. Learn more about Structured Outputs in the
- [function calling guide](docs/guides/function-calling).
- """
diff --git a/src/digitalocean_genai_sdk/types/function_tool_call.py b/src/digitalocean_genai_sdk/types/function_tool_call.py
deleted file mode 100644
index ecdb4a02..00000000
--- a/src/digitalocean_genai_sdk/types/function_tool_call.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FunctionToolCall"]
-
-
-class FunctionToolCall(BaseModel):
- arguments: str
- """A JSON string of the arguments to pass to the function."""
-
- call_id: str
- """The unique ID of the function tool call generated by the model."""
-
- name: str
- """The name of the function to run."""
-
- type: Literal["function_call"]
- """The type of the function tool call. Always `function_call`."""
-
- id: Optional[str] = None
- """The unique ID of the function tool call."""
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_output.py b/src/digitalocean_genai_sdk/types/function_tool_call_output.py
deleted file mode 100644
index 4cbe27ce..00000000
--- a/src/digitalocean_genai_sdk/types/function_tool_call_output.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FunctionToolCallOutput"]
-
-
-class FunctionToolCallOutput(BaseModel):
- call_id: str
- """The unique ID of the function tool call generated by the model."""
-
- output: str
- """A JSON string of the output of the function tool call."""
-
- type: Literal["function_call_output"]
- """The type of the function tool call output. Always `function_call_output`."""
-
- id: Optional[str] = None
- """The unique ID of the function tool call output.
-
- Populated when this item is returned via API.
- """
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py b/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py
deleted file mode 100644
index 49a573ed..00000000
--- a/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FunctionToolCallOutputParam"]
-
-
-class FunctionToolCallOutputParam(TypedDict, total=False):
- call_id: Required[str]
- """The unique ID of the function tool call generated by the model."""
-
- output: Required[str]
- """A JSON string of the output of the function tool call."""
-
- type: Required[Literal["function_call_output"]]
- """The type of the function tool call output. Always `function_call_output`."""
-
- id: str
- """The unique ID of the function tool call output.
-
- Populated when this item is returned via API.
- """
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_param.py b/src/digitalocean_genai_sdk/types/function_tool_call_param.py
deleted file mode 100644
index 91e076b6..00000000
--- a/src/digitalocean_genai_sdk/types/function_tool_call_param.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FunctionToolCallParam"]
-
-
-class FunctionToolCallParam(TypedDict, total=False):
- arguments: Required[str]
- """A JSON string of the arguments to pass to the function."""
-
- call_id: Required[str]
- """The unique ID of the function tool call generated by the model."""
-
- name: Required[str]
- """The name of the function to run."""
-
- type: Required[Literal["function_call"]]
- """The type of the function tool call. Always `function_call`."""
-
- id: str
- """The unique ID of the function tool call."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/image_create_edit_params.py b/src/digitalocean_genai_sdk/types/image_create_edit_params.py
deleted file mode 100644
index f84f5642..00000000
--- a/src/digitalocean_genai_sdk/types/image_create_edit_params.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["ImageCreateEditParams"]
-
-
-class ImageCreateEditParams(TypedDict, total=False):
- image: Required[FileTypes]
- """The image to edit.
-
- Must be a valid PNG file, less than 4MB, and square. If mask is not provided,
- image must have transparency, which will be used as the mask.
- """
-
- prompt: Required[str]
- """A text description of the desired image(s).
-
- The maximum length is 1000 characters.
- """
-
- mask: FileTypes
- """An additional image whose fully transparent areas (e.g.
-
- where alpha is zero) indicate where `image` should be edited. Must be a valid
- PNG file, less than 4MB, and have the same dimensions as `image`.
- """
-
- model: Union[str, Literal["dall-e-2"], None]
- """The model to use for image generation.
-
- Only `dall-e-2` is supported at this time.
- """
-
- n: Optional[int]
- """The number of images to generate. Must be between 1 and 10."""
-
- response_format: Optional[Literal["url", "b64_json"]]
- """The format in which the generated images are returned.
-
- Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated.
- """
-
- size: Optional[Literal["256x256", "512x512", "1024x1024"]]
- """The size of the generated images.
-
- Must be one of `256x256`, `512x512`, or `1024x1024`.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/image_create_generation_params.py b/src/digitalocean_genai_sdk/types/image_create_generation_params.py
deleted file mode 100644
index e8cfbb18..00000000
--- a/src/digitalocean_genai_sdk/types/image_create_generation_params.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ImageCreateGenerationParams"]
-
-
-class ImageCreateGenerationParams(TypedDict, total=False):
- prompt: Required[str]
- """A text description of the desired image(s).
-
- The maximum length is 1000 characters for `dall-e-2` and 4000 characters for
- `dall-e-3`.
- """
-
- model: Union[str, Literal["dall-e-2", "dall-e-3"], None]
- """The model to use for image generation."""
-
- n: Optional[int]
- """The number of images to generate.
-
- Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
- """
-
- quality: Literal["standard", "hd"]
- """The quality of the image that will be generated.
-
- `hd` creates images with finer details and greater consistency across the image.
- This param is only supported for `dall-e-3`.
- """
-
- response_format: Optional[Literal["url", "b64_json"]]
- """The format in which the generated images are returned.
-
- Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated.
- """
-
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
- """The size of the generated images.
-
- Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one
- of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.
- """
-
- style: Optional[Literal["vivid", "natural"]]
- """The style of the generated images.
-
- Must be one of `vivid` or `natural`. Vivid causes the model to lean towards
- generating hyper-real and dramatic images. Natural causes the model to produce
- more natural, less hyper-real looking images. This param is only supported for
- `dall-e-3`.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/image_create_variation_params.py b/src/digitalocean_genai_sdk/types/image_create_variation_params.py
deleted file mode 100644
index 64245a05..00000000
--- a/src/digitalocean_genai_sdk/types/image_create_variation_params.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["ImageCreateVariationParams"]
-
-
-class ImageCreateVariationParams(TypedDict, total=False):
- image: Required[FileTypes]
- """The image to use as the basis for the variation(s).
-
- Must be a valid PNG file, less than 4MB, and square.
- """
-
- model: Union[str, Literal["dall-e-2"], None]
- """The model to use for image generation.
-
- Only `dall-e-2` is supported at this time.
- """
-
- n: Optional[int]
- """The number of images to generate.
-
- Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
- """
-
- response_format: Optional[Literal["url", "b64_json"]]
- """The format in which the generated images are returned.
-
- Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated.
- """
-
- size: Optional[Literal["256x256", "512x512", "1024x1024"]]
- """The size of the generated images.
-
- Must be one of `256x256`, `512x512`, or `1024x1024`.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/images_response.py b/src/digitalocean_genai_sdk/types/images_response.py
deleted file mode 100644
index 509e0069..00000000
--- a/src/digitalocean_genai_sdk/types/images_response.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from .._models import BaseModel
-
-__all__ = ["ImagesResponse", "Data"]
-
-
-class Data(BaseModel):
- b64_json: Optional[str] = None
- """
- The base64-encoded JSON of the generated image, if `response_format` is
- `b64_json`.
- """
-
- revised_prompt: Optional[str] = None
- """
- The prompt that was used to generate the image, if there was any revision to the
- prompt.
- """
-
- url: Optional[str] = None
- """The URL of the generated image, if `response_format` is `url` (default)."""
-
-
-class ImagesResponse(BaseModel):
- created: int
-
- data: List[Data]
diff --git a/src/digitalocean_genai_sdk/types/includable.py b/src/digitalocean_genai_sdk/types/includable.py
deleted file mode 100644
index 8b4920a2..00000000
--- a/src/digitalocean_genai_sdk/types/includable.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["Includable"]
-
-Includable: TypeAlias = Literal[
- "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url"
-]
diff --git a/src/digitalocean_genai_sdk/types/input_content.py b/src/digitalocean_genai_sdk/types/input_content.py
deleted file mode 100644
index 04e37845..00000000
--- a/src/digitalocean_genai_sdk/types/input_content.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-
-__all__ = ["InputContent", "InputText", "InputImage", "InputFile"]
-
-
-class InputText(BaseModel):
- text: str
- """The text input to the model."""
-
- type: Literal["input_text"]
- """The type of the input item. Always `input_text`."""
-
-
-class InputImage(BaseModel):
- detail: Literal["high", "low", "auto"]
- """The detail level of the image to be sent to the model.
-
- One of `high`, `low`, or `auto`. Defaults to `auto`.
- """
-
- type: Literal["input_image"]
- """The type of the input item. Always `input_image`."""
-
- file_id: Optional[str] = None
- """The ID of the file to be sent to the model."""
-
- image_url: Optional[str] = None
- """The URL of the image to be sent to the model.
-
- A fully qualified URL or base64 encoded image in a data URL.
- """
-
-
-class InputFile(BaseModel):
- type: Literal["input_file"]
- """The type of the input item. Always `input_file`."""
-
- file_data: Optional[str] = None
- """The content of the file to be sent to the model."""
-
- file_id: Optional[str] = None
- """The ID of the file to be sent to the model."""
-
- filename: Optional[str] = None
- """The name of the file to be sent to the model."""
-
-
-InputContent: TypeAlias = Union[InputText, InputImage, InputFile]
diff --git a/src/digitalocean_genai_sdk/types/input_content_param.py b/src/digitalocean_genai_sdk/types/input_content_param.py
deleted file mode 100644
index ed0bdf62..00000000
--- a/src/digitalocean_genai_sdk/types/input_content_param.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = ["InputContentParam", "InputText", "InputImage", "InputFile"]
-
-
-class InputText(TypedDict, total=False):
- text: Required[str]
- """The text input to the model."""
-
- type: Required[Literal["input_text"]]
- """The type of the input item. Always `input_text`."""
-
-
-class InputImage(TypedDict, total=False):
- detail: Required[Literal["high", "low", "auto"]]
- """The detail level of the image to be sent to the model.
-
- One of `high`, `low`, or `auto`. Defaults to `auto`.
- """
-
- type: Required[Literal["input_image"]]
- """The type of the input item. Always `input_image`."""
-
- file_id: Optional[str]
- """The ID of the file to be sent to the model."""
-
- image_url: Optional[str]
- """The URL of the image to be sent to the model.
-
- A fully qualified URL or base64 encoded image in a data URL.
- """
-
-
-class InputFile(TypedDict, total=False):
- type: Required[Literal["input_file"]]
- """The type of the input item. Always `input_file`."""
-
- file_data: str
- """The content of the file to be sent to the model."""
-
- file_id: str
- """The ID of the file to be sent to the model."""
-
- filename: str
- """The name of the file to be sent to the model."""
-
-
-InputContentParam: TypeAlias = Union[InputText, InputImage, InputFile]
diff --git a/src/digitalocean_genai_sdk/types/input_message.py b/src/digitalocean_genai_sdk/types/input_message.py
deleted file mode 100644
index 4dc5526f..00000000
--- a/src/digitalocean_genai_sdk/types/input_message.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .input_content import InputContent
-
-__all__ = ["InputMessage"]
-
-
-class InputMessage(BaseModel):
- content: List[InputContent]
- """
- A list of one or many input items to the model, containing different content
- types.
- """
-
- role: Literal["user", "system", "developer"]
- """The role of the message input. One of `user`, `system`, or `developer`."""
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
-
- type: Optional[Literal["message"]] = None
- """The type of the message input. Always set to `message`."""
diff --git a/src/digitalocean_genai_sdk/types/input_message_param.py b/src/digitalocean_genai_sdk/types/input_message_param.py
deleted file mode 100644
index 388c54ca..00000000
--- a/src/digitalocean_genai_sdk/types/input_message_param.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-from .input_content_param import InputContentParam
-
-__all__ = ["InputMessageParam"]
-
-
-class InputMessageParam(TypedDict, total=False):
- content: Required[Iterable[InputContentParam]]
- """
- A list of one or many input items to the model, containing different content
- types.
- """
-
- role: Required[Literal["user", "system", "developer"]]
- """The role of the message input. One of `user`, `system`, or `developer`."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
-
- type: Literal["message"]
- """The type of the message input. Always set to `message`."""
diff --git a/src/digitalocean_genai_sdk/types/model_delete_response.py b/src/digitalocean_genai_sdk/types/model_delete_response.py
deleted file mode 100644
index 63b2d296..00000000
--- a/src/digitalocean_genai_sdk/types/model_delete_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["ModelDeleteResponse"]
-
-
-class ModelDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/model_response_properties.py b/src/digitalocean_genai_sdk/types/model_response_properties.py
deleted file mode 100644
index 547c6391..00000000
--- a/src/digitalocean_genai_sdk/types/model_response_properties.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-
-from .._models import BaseModel
-
-__all__ = ["ModelResponseProperties"]
-
-
-class ModelResponseProperties(BaseModel):
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- temperature: Optional[float] = None
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. We generally recommend altering
- this or `top_p` but not both.
- """
-
- top_p: Optional[float] = None
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
- """
-
- user: Optional[str] = None
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/moderation_classify_params.py b/src/digitalocean_genai_sdk/types/moderation_classify_params.py
deleted file mode 100644
index bcc99a1e..00000000
--- a/src/digitalocean_genai_sdk/types/moderation_classify_params.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = [
- "ModerationClassifyParams",
- "InputUnionMember2",
- "InputUnionMember2UnionMember0",
- "InputUnionMember2UnionMember0ImageURL",
- "InputUnionMember2UnionMember1",
-]
-
-
-class ModerationClassifyParams(TypedDict, total=False):
- input: Required[Union[str, List[str], Iterable[InputUnionMember2]]]
- """Input (or inputs) to classify.
-
- Can be a single string, an array of strings, or an array of multi-modal input
- objects similar to other models.
- """
-
- model: Union[
- str,
- Literal[
- "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable"
- ],
- ]
- """The content moderation model you would like to use.
-
- Learn more in [the moderation guide](/docs/guides/moderation), and learn about
- available models [here](/docs/models#moderation).
- """
-
-
-class InputUnionMember2UnionMember0ImageURL(TypedDict, total=False):
- url: Required[str]
- """Either a URL of the image or the base64 encoded image data."""
-
-
-class InputUnionMember2UnionMember0(TypedDict, total=False):
- image_url: Required[InputUnionMember2UnionMember0ImageURL]
- """Contains either an image URL or a data URL for a base64 encoded image."""
-
- type: Required[Literal["image_url"]]
- """Always `image_url`."""
-
-
-class InputUnionMember2UnionMember1(TypedDict, total=False):
- text: Required[str]
- """A string of text to classify."""
-
- type: Required[Literal["text"]]
- """Always `text`."""
-
-
-InputUnionMember2: TypeAlias = Union[InputUnionMember2UnionMember0, InputUnionMember2UnionMember1]
diff --git a/src/digitalocean_genai_sdk/types/moderation_classify_response.py b/src/digitalocean_genai_sdk/types/moderation_classify_response.py
deleted file mode 100644
index cfda7318..00000000
--- a/src/digitalocean_genai_sdk/types/moderation_classify_response.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-
-__all__ = [
- "ModerationClassifyResponse",
- "Result",
- "ResultCategories",
- "ResultCategoryAppliedInputTypes",
- "ResultCategoryScores",
-]
-
-
-class ResultCategories(BaseModel):
- harassment: bool
- """
- Content that expresses, incites, or promotes harassing language towards any
- target.
- """
-
- harassment_threatening: bool = FieldInfo(alias="harassment/threatening")
- """
- Harassment content that also includes violence or serious harm towards any
- target.
- """
-
- hate: bool
- """
- Content that expresses, incites, or promotes hate based on race, gender,
- ethnicity, religion, nationality, sexual orientation, disability status, or
- caste. Hateful content aimed at non-protected groups (e.g., chess players) is
- harassment.
- """
-
- hate_threatening: bool = FieldInfo(alias="hate/threatening")
- """
- Hateful content that also includes violence or serious harm towards the targeted
- group based on race, gender, ethnicity, religion, nationality, sexual
- orientation, disability status, or caste.
- """
-
- illicit: Optional[bool] = None
- """
- Content that includes instructions or advice that facilitate the planning or
- execution of wrongdoing, or that gives advice or instruction on how to commit
- illicit acts. For example, "how to shoplift" would fit this category.
- """
-
- illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None)
- """
- Content that includes instructions or advice that facilitate the planning or
- execution of wrongdoing that also includes violence, or that gives advice or
- instruction on the procurement of any weapon.
- """
-
- self_harm: bool = FieldInfo(alias="self-harm")
- """
- Content that promotes, encourages, or depicts acts of self-harm, such as
- suicide, cutting, and eating disorders.
- """
-
- self_harm_instructions: bool = FieldInfo(alias="self-harm/instructions")
- """
- Content that encourages performing acts of self-harm, such as suicide, cutting,
- and eating disorders, or that gives instructions or advice on how to commit such
- acts.
- """
-
- self_harm_intent: bool = FieldInfo(alias="self-harm/intent")
- """
- Content where the speaker expresses that they are engaging or intend to engage
- in acts of self-harm, such as suicide, cutting, and eating disorders.
- """
-
- sexual: bool
- """
- Content meant to arouse sexual excitement, such as the description of sexual
- activity, or that promotes sexual services (excluding sex education and
- wellness).
- """
-
- sexual_minors: bool = FieldInfo(alias="sexual/minors")
- """Sexual content that includes an individual who is under 18 years old."""
-
- violence: bool
- """Content that depicts death, violence, or physical injury."""
-
- violence_graphic: bool = FieldInfo(alias="violence/graphic")
- """Content that depicts death, violence, or physical injury in graphic detail."""
-
-
-class ResultCategoryAppliedInputTypes(BaseModel):
- harassment: List[Literal["text"]]
- """The applied input type(s) for the category 'harassment'."""
-
- harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening")
- """The applied input type(s) for the category 'harassment/threatening'."""
-
- hate: List[Literal["text"]]
- """The applied input type(s) for the category 'hate'."""
-
- hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening")
- """The applied input type(s) for the category 'hate/threatening'."""
-
- illicit: List[Literal["text"]]
- """The applied input type(s) for the category 'illicit'."""
-
- illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent")
- """The applied input type(s) for the category 'illicit/violent'."""
-
- self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm")
- """The applied input type(s) for the category 'self-harm'."""
-
- self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions")
- """The applied input type(s) for the category 'self-harm/instructions'."""
-
- self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent")
- """The applied input type(s) for the category 'self-harm/intent'."""
-
- sexual: List[Literal["text", "image"]]
- """The applied input type(s) for the category 'sexual'."""
-
- sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors")
- """The applied input type(s) for the category 'sexual/minors'."""
-
- violence: List[Literal["text", "image"]]
- """The applied input type(s) for the category 'violence'."""
-
- violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic")
- """The applied input type(s) for the category 'violence/graphic'."""
-
-
-class ResultCategoryScores(BaseModel):
- harassment: float
- """The score for the category 'harassment'."""
-
- harassment_threatening: float = FieldInfo(alias="harassment/threatening")
- """The score for the category 'harassment/threatening'."""
-
- hate: float
- """The score for the category 'hate'."""
-
- hate_threatening: float = FieldInfo(alias="hate/threatening")
- """The score for the category 'hate/threatening'."""
-
- illicit: float
- """The score for the category 'illicit'."""
-
- illicit_violent: float = FieldInfo(alias="illicit/violent")
- """The score for the category 'illicit/violent'."""
-
- self_harm: float = FieldInfo(alias="self-harm")
- """The score for the category 'self-harm'."""
-
- self_harm_instructions: float = FieldInfo(alias="self-harm/instructions")
- """The score for the category 'self-harm/instructions'."""
-
- self_harm_intent: float = FieldInfo(alias="self-harm/intent")
- """The score for the category 'self-harm/intent'."""
-
- sexual: float
- """The score for the category 'sexual'."""
-
- sexual_minors: float = FieldInfo(alias="sexual/minors")
- """The score for the category 'sexual/minors'."""
-
- violence: float
- """The score for the category 'violence'."""
-
- violence_graphic: float = FieldInfo(alias="violence/graphic")
- """The score for the category 'violence/graphic'."""
-
-
-class Result(BaseModel):
- categories: ResultCategories
- """A list of the categories, and whether they are flagged or not."""
-
- category_applied_input_types: ResultCategoryAppliedInputTypes
- """
- A list of the categories along with the input type(s) that the score applies to.
- """
-
- category_scores: ResultCategoryScores
- """A list of the categories along with their scores as predicted by model."""
-
- flagged: bool
- """Whether any of the below categories are flagged."""
-
-
-class ModerationClassifyResponse(BaseModel):
- id: str
- """The unique identifier for the moderation request."""
-
- model: str
- """The model used to generate the moderation results."""
-
- results: List[Result]
- """A list of moderation objects."""
diff --git a/src/digitalocean_genai_sdk/types/openai_file.py b/src/digitalocean_genai_sdk/types/openai_file.py
deleted file mode 100644
index a8398a35..00000000
--- a/src/digitalocean_genai_sdk/types/openai_file.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["OpenAIFile"]
-
-
-class OpenAIFile(BaseModel):
- id: str
- """The file identifier, which can be referenced in the API endpoints."""
-
- bytes: int
- """The size of the file, in bytes."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the file was created."""
-
- filename: str
- """The name of the file."""
-
- object: Literal["file"]
- """The object type, which is always `file`."""
-
- purpose: Literal[
- "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision"
- ]
- """The intended purpose of the file.
-
- Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`,
- `fine-tune`, `fine-tune-results` and `vision`.
- """
-
- status: Literal["uploaded", "processed", "error"]
- """Deprecated.
-
- The current status of the file, which can be either `uploaded`, `processed`, or
- `error`.
- """
-
- expires_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the file will expire."""
-
- status_details: Optional[str] = None
- """Deprecated.
-
- For details on why a fine-tuning training file failed validation, see the
- `error` field on `fine_tuning.job`.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/__init__.py b/src/digitalocean_genai_sdk/types/organization/__init__.py
index 5b34f495..f8ee8b14 100644
--- a/src/digitalocean_genai_sdk/types/organization/__init__.py
+++ b/src/digitalocean_genai_sdk/types/organization/__init__.py
@@ -1,34 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
-
-from .invite import Invite as Invite
-from .project import Project as Project
-from .admin_api_key import AdminAPIKey as AdminAPIKey
-from .user_list_params import UserListParams as UserListParams
-from .organization_user import OrganizationUser as OrganizationUser
-from .invite_list_params import InviteListParams as InviteListParams
-from .user_list_response import UserListResponse as UserListResponse
-from .user_update_params import UserUpdateParams as UserUpdateParams
-from .project_list_params import ProjectListParams as ProjectListParams
-from .usage_images_params import UsageImagesParams as UsageImagesParams
-from .invite_create_params import InviteCreateParams as InviteCreateParams
-from .invite_list_response import InviteListResponse as InviteListResponse
-from .user_delete_response import UserDeleteResponse as UserDeleteResponse
-from .project_create_params import ProjectCreateParams as ProjectCreateParams
-from .project_list_response import ProjectListResponse as ProjectListResponse
-from .project_update_params import ProjectUpdateParams as ProjectUpdateParams
-from .invite_delete_response import InviteDeleteResponse as InviteDeleteResponse
-from .usage_embeddings_params import UsageEmbeddingsParams as UsageEmbeddingsParams
-from .usage_completions_params import UsageCompletionsParams as UsageCompletionsParams
-from .usage_moderations_params import UsageModerationsParams as UsageModerationsParams
-from .admin_api_key_list_params import AdminAPIKeyListParams as AdminAPIKeyListParams
-from .usage_vector_stores_params import UsageVectorStoresParams as UsageVectorStoresParams
-from .admin_api_key_create_params import AdminAPIKeyCreateParams as AdminAPIKeyCreateParams
-from .admin_api_key_list_response import AdminAPIKeyListResponse as AdminAPIKeyListResponse
-from .usage_audio_speeches_params import UsageAudioSpeechesParams as UsageAudioSpeechesParams
-from .admin_api_key_delete_response import AdminAPIKeyDeleteResponse as AdminAPIKeyDeleteResponse
-from .usage_audio_transcriptions_params import UsageAudioTranscriptionsParams as UsageAudioTranscriptionsParams
-from .usage_code_interpreter_sessions_params import (
- UsageCodeInterpreterSessionsParams as UsageCodeInterpreterSessionsParams,
-)
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key.py
deleted file mode 100644
index 8a57458f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ..._models import BaseModel
-
-__all__ = ["AdminAPIKey", "Owner"]
-
-
-class Owner(BaseModel):
- id: Optional[str] = None
-
- created_at: Optional[int] = None
-
- name: Optional[str] = None
-
- role: Optional[str] = None
-
- type: Optional[str] = None
-
-
-class AdminAPIKey(BaseModel):
- id: Optional[str] = None
-
- created_at: Optional[int] = None
-
- name: Optional[str] = None
-
- object: Optional[str] = None
-
- owner: Optional[Owner] = None
-
- redacted_value: Optional[str] = None
-
- value: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py
deleted file mode 100644
index dccdfb8a..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["AdminAPIKeyCreateParams"]
-
-
-class AdminAPIKeyCreateParams(TypedDict, total=False):
- name: Required[str]
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py
deleted file mode 100644
index b752558c..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ..._models import BaseModel
-
-__all__ = ["AdminAPIKeyDeleteResponse"]
-
-
-class AdminAPIKeyDeleteResponse(BaseModel):
- id: Optional[str] = None
-
- deleted: Optional[bool] = None
-
- object: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py
deleted file mode 100644
index c3b3f510..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["AdminAPIKeyListParams"]
-
-
-class AdminAPIKeyListParams(TypedDict, total=False):
- after: Optional[str]
- """Return keys with IDs that come after this ID in the pagination order."""
-
- limit: int
- """Maximum number of keys to return."""
-
- order: Literal["asc", "desc"]
- """Order results by creation time, ascending or descending."""
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py
deleted file mode 100644
index 8ef9beb7..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from ..._models import BaseModel
-from .admin_api_key import AdminAPIKey
-
-__all__ = ["AdminAPIKeyListResponse"]
-
-
-class AdminAPIKeyListResponse(BaseModel):
- data: Optional[List[AdminAPIKey]] = None
-
- first_id: Optional[str] = None
-
- has_more: Optional[bool] = None
-
- last_id: Optional[str] = None
-
- object: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/organization/invite.py b/src/digitalocean_genai_sdk/types/organization/invite.py
deleted file mode 100644
index fd495caf..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["Invite", "Project"]
-
-
-class Project(BaseModel):
- id: Optional[str] = None
- """Project's public ID"""
-
- role: Optional[Literal["member", "owner"]] = None
- """Project membership role"""
-
-
-class Invite(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- email: str
- """The email address of the individual to whom the invite was sent"""
-
- expires_at: int
- """The Unix timestamp (in seconds) of when the invite expires."""
-
- invited_at: int
- """The Unix timestamp (in seconds) of when the invite was sent."""
-
- object: Literal["organization.invite"]
- """The object type, which is always `organization.invite`"""
-
- role: Literal["owner", "reader"]
- """`owner` or `reader`"""
-
- status: Literal["accepted", "expired", "pending"]
- """`accepted`,`expired`, or `pending`"""
-
- accepted_at: Optional[int] = None
- """The Unix timestamp (in seconds) of when the invite was accepted."""
-
- projects: Optional[List[Project]] = None
- """The projects that were granted membership upon acceptance of the invite."""
diff --git a/src/digitalocean_genai_sdk/types/organization/invite_create_params.py b/src/digitalocean_genai_sdk/types/organization/invite_create_params.py
deleted file mode 100644
index 7709003f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite_create_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["InviteCreateParams", "Project"]
-
-
-class InviteCreateParams(TypedDict, total=False):
- email: Required[str]
- """Send an email to this address"""
-
- role: Required[Literal["reader", "owner"]]
- """`owner` or `reader`"""
-
- projects: Iterable[Project]
- """
- An array of projects to which membership is granted at the same time the org
- invite is accepted. If omitted, the user will be invited to the default project
- for compatibility with legacy behavior.
- """
-
-
-class Project(TypedDict, total=False):
- id: Required[str]
- """Project's public ID"""
-
- role: Required[Literal["member", "owner"]]
- """Project membership role"""
diff --git a/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py b/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py
deleted file mode 100644
index 52bd47b9..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["InviteDeleteResponse"]
-
-
-class InviteDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.invite.deleted"]
- """The object type, which is always `organization.invite.deleted`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/invite_list_params.py b/src/digitalocean_genai_sdk/types/organization/invite_list_params.py
deleted file mode 100644
index 678510d6..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["InviteListParams"]
-
-
-class InviteListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/invite_list_response.py b/src/digitalocean_genai_sdk/types/organization/invite_list_response.py
deleted file mode 100644
index 2b646289..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite_list_response.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .invite import Invite
-from ..._models import BaseModel
-
-__all__ = ["InviteListResponse"]
-
-
-class InviteListResponse(BaseModel):
- data: List[Invite]
-
- object: Literal["list"]
- """The object type, which is always `list`"""
-
- first_id: Optional[str] = None
- """The first `invite_id` in the retrieved `list`"""
-
- has_more: Optional[bool] = None
- """
- The `has_more` property is used for pagination to indicate there are additional
- results.
- """
-
- last_id: Optional[str] = None
- """The last `invite_id` in the retrieved `list`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/organization_user.py b/src/digitalocean_genai_sdk/types/organization/organization_user.py
deleted file mode 100644
index 890833f1..00000000
--- a/src/digitalocean_genai_sdk/types/organization/organization_user.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["OrganizationUser"]
-
-
-class OrganizationUser(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- added_at: int
- """The Unix timestamp (in seconds) of when the user was added."""
-
- email: str
- """The email address of the user"""
-
- name: str
- """The name of the user"""
-
- object: Literal["organization.user"]
- """The object type, which is always `organization.user`"""
-
- role: Literal["owner", "reader"]
- """`owner` or `reader`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/project.py b/src/digitalocean_genai_sdk/types/organization/project.py
deleted file mode 100644
index 731e8609..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["Project"]
-
-
-class Project(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- created_at: int
- """The Unix timestamp (in seconds) of when the project was created."""
-
- name: str
- """The name of the project. This appears in reporting."""
-
- object: Literal["organization.project"]
- """The object type, which is always `organization.project`"""
-
- status: Literal["active", "archived"]
- """`active` or `archived`"""
-
- archived_at: Optional[int] = None
- """The Unix timestamp (in seconds) of when the project was archived or `null`."""
diff --git a/src/digitalocean_genai_sdk/types/organization/project_create_params.py b/src/digitalocean_genai_sdk/types/organization/project_create_params.py
deleted file mode 100644
index 0c18bc5b..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project_create_params.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ProjectCreateParams"]
-
-
-class ProjectCreateParams(TypedDict, total=False):
- name: Required[str]
- """The friendly name of the project, this name appears in reports."""
diff --git a/src/digitalocean_genai_sdk/types/organization/project_list_params.py b/src/digitalocean_genai_sdk/types/organization/project_list_params.py
deleted file mode 100644
index f55fb8a3..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project_list_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["ProjectListParams"]
-
-
-class ProjectListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- include_archived: bool
- """If `true` returns all projects including those that have been `archived`.
-
- Archived projects are not included by default.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/project_list_response.py b/src/digitalocean_genai_sdk/types/organization/project_list_response.py
deleted file mode 100644
index 24a79f63..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from .project import Project
-from ..._models import BaseModel
-
-__all__ = ["ProjectListResponse"]
-
-
-class ProjectListResponse(BaseModel):
- data: List[Project]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/project_update_params.py b/src/digitalocean_genai_sdk/types/organization/project_update_params.py
deleted file mode 100644
index 0ba1984a..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project_update_params.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ProjectUpdateParams"]
-
-
-class ProjectUpdateParams(TypedDict, total=False):
- name: Required[str]
- """The updated name of the project, this name appears in reports."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py b/src/digitalocean_genai_sdk/types/organization/projects/__init__.py
index 4b0e0f9b..f8ee8b14 100644
--- a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py
+++ b/src/digitalocean_genai_sdk/types/organization/projects/__init__.py
@@ -1,24 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
-
-from .api_key import APIKey as APIKey
-from .rate_limit import RateLimit as RateLimit
-from .project_user import ProjectUser as ProjectUser
-from .service_account import ServiceAccount as ServiceAccount
-from .user_add_params import UserAddParams as UserAddParams
-from .user_list_params import UserListParams as UserListParams
-from .user_list_response import UserListResponse as UserListResponse
-from .user_update_params import UserUpdateParams as UserUpdateParams
-from .api_key_list_params import APIKeyListParams as APIKeyListParams
-from .user_delete_response import UserDeleteResponse as UserDeleteResponse
-from .api_key_list_response import APIKeyListResponse as APIKeyListResponse
-from .rate_limit_list_params import RateLimitListParams as RateLimitListParams
-from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse
-from .rate_limit_list_response import RateLimitListResponse as RateLimitListResponse
-from .rate_limit_update_params import RateLimitUpdateParams as RateLimitUpdateParams
-from .service_account_list_params import ServiceAccountListParams as ServiceAccountListParams
-from .service_account_create_params import ServiceAccountCreateParams as ServiceAccountCreateParams
-from .service_account_list_response import ServiceAccountListResponse as ServiceAccountListResponse
-from .service_account_create_response import ServiceAccountCreateResponse as ServiceAccountCreateResponse
-from .service_account_delete_response import ServiceAccountDeleteResponse as ServiceAccountDeleteResponse
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key.py
deleted file mode 100644
index 276f6d9b..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/api_key.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-from .project_user import ProjectUser
-from .service_account import ServiceAccount
-
-__all__ = ["APIKey", "Owner"]
-
-
-class Owner(BaseModel):
- service_account: Optional[ServiceAccount] = None
- """Represents an individual service account in a project."""
-
- type: Optional[Literal["user", "service_account"]] = None
- """`user` or `service_account`"""
-
- user: Optional[ProjectUser] = None
- """Represents an individual user in a project."""
-
-
-class APIKey(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- created_at: int
- """The Unix timestamp (in seconds) of when the API key was created"""
-
- name: str
- """The name of the API key"""
-
- object: Literal["organization.project.api_key"]
- """The object type, which is always `organization.project.api_key`"""
-
- owner: Owner
-
- redacted_value: str
- """The redacted value of the API key"""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py
deleted file mode 100644
index c3ec64bd..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["APIKeyDeleteResponse"]
-
-
-class APIKeyDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.project.api_key.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py
deleted file mode 100644
index 422a2851..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["APIKeyListParams"]
-
-
-class APIKeyListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py
deleted file mode 100644
index 669de6c6..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from .api_key import APIKey
-from ...._models import BaseModel
-
-__all__ = ["APIKeyListResponse"]
-
-
-class APIKeyListResponse(BaseModel):
- data: List[APIKey]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/project_user.py b/src/digitalocean_genai_sdk/types/organization/projects/project_user.py
deleted file mode 100644
index afcdb514..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/project_user.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ProjectUser"]
-
-
-class ProjectUser(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- added_at: int
- """The Unix timestamp (in seconds) of when the project was added."""
-
- email: str
- """The email address of the user"""
-
- name: str
- """The name of the user"""
-
- object: Literal["organization.project.user"]
- """The object type, which is always `organization.project.user`"""
-
- role: Literal["owner", "member"]
- """`owner` or `member`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py
deleted file mode 100644
index 1a9795f5..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["RateLimit"]
-
-
-class RateLimit(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- max_requests_per_1_minute: int
- """The maximum requests per minute."""
-
- max_tokens_per_1_minute: int
- """The maximum tokens per minute."""
-
- model: str
- """The model this rate limit applies to."""
-
- object: Literal["project.rate_limit"]
- """The object type, which is always `project.rate_limit`"""
-
- batch_1_day_max_input_tokens: Optional[int] = None
- """The maximum batch input tokens per day. Only present for relevant models."""
-
- max_audio_megabytes_per_1_minute: Optional[int] = None
- """The maximum audio megabytes per minute. Only present for relevant models."""
-
- max_images_per_1_minute: Optional[int] = None
- """The maximum images per minute. Only present for relevant models."""
-
- max_requests_per_1_day: Optional[int] = None
- """The maximum requests per day. Only present for relevant models."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py
deleted file mode 100644
index aa007e5f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["RateLimitListParams"]
-
-
-class RateLimitListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, beginning with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned. The default is 100."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py
deleted file mode 100644
index f2133f3e..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-from .rate_limit import RateLimit
-
-__all__ = ["RateLimitListResponse"]
-
-
-class RateLimitListResponse(BaseModel):
- data: List[RateLimit]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py
deleted file mode 100644
index a303d6f4..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["RateLimitUpdateParams"]
-
-
-class RateLimitUpdateParams(TypedDict, total=False):
- project_id: Required[str]
-
- batch_1_day_max_input_tokens: int
- """The maximum batch input tokens per day. Only relevant for certain models."""
-
- max_audio_megabytes_per_1_minute: int
- """The maximum audio megabytes per minute. Only relevant for certain models."""
-
- max_images_per_1_minute: int
- """The maximum images per minute. Only relevant for certain models."""
-
- max_requests_per_1_day: int
- """The maximum requests per day. Only relevant for certain models."""
-
- max_requests_per_1_minute: int
- """The maximum requests per minute."""
-
- max_tokens_per_1_minute: int
- """The maximum tokens per minute."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account.py
deleted file mode 100644
index 9200ba11..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ServiceAccount"]
-
-
-class ServiceAccount(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- created_at: int
- """The Unix timestamp (in seconds) of when the service account was created"""
-
- name: str
- """The name of the service account"""
-
- object: Literal["organization.project.service_account"]
- """The object type, which is always `organization.project.service_account`"""
-
- role: Literal["owner", "member"]
- """`owner` or `member`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py
deleted file mode 100644
index 409dcba5..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ServiceAccountCreateParams"]
-
-
-class ServiceAccountCreateParams(TypedDict, total=False):
- name: Required[str]
- """The name of the service account being created."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py
deleted file mode 100644
index e7757a8a..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ServiceAccountCreateResponse", "APIKey"]
-
-
-class APIKey(BaseModel):
- id: str
-
- created_at: int
-
- name: str
-
- object: Literal["organization.project.service_account.api_key"]
- """The object type, which is always `organization.project.service_account.api_key`"""
-
- value: str
-
-
-class ServiceAccountCreateResponse(BaseModel):
- id: str
-
- api_key: APIKey
-
- created_at: int
-
- name: str
-
- object: Literal["organization.project.service_account"]
-
- role: Literal["member"]
- """Service accounts can only have one role of type `member`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py
deleted file mode 100644
index 28d04e10..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ServiceAccountDeleteResponse"]
-
-
-class ServiceAccountDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.project.service_account.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py
deleted file mode 100644
index 7f808e28..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["ServiceAccountListParams"]
-
-
-class ServiceAccountListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py
deleted file mode 100644
index 0818c8c8..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-from .service_account import ServiceAccount
-
-__all__ = ["ServiceAccountListResponse"]
-
-
-class ServiceAccountListResponse(BaseModel):
- data: List[ServiceAccount]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py
deleted file mode 100644
index 85f38c0c..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UserAddParams"]
-
-
-class UserAddParams(TypedDict, total=False):
- role: Required[Literal["owner", "member"]]
- """`owner` or `member`"""
-
- user_id: Required[str]
- """The ID of the user."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py
deleted file mode 100644
index 7ac68cc5..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["UserDeleteResponse"]
-
-
-class UserDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.project.user.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py
deleted file mode 100644
index d561e907..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["UserListParams"]
-
-
-class UserListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py
deleted file mode 100644
index 1f8993ad..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ...._models import BaseModel
-from .project_user import ProjectUser
-
-__all__ = ["UserListResponse"]
-
-
-class UserListResponse(BaseModel):
- data: List[ProjectUser]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py
deleted file mode 100644
index 08b3e1a4..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UserUpdateParams"]
-
-
-class UserUpdateParams(TypedDict, total=False):
- project_id: Required[str]
-
- role: Required[Literal["owner", "member"]]
- """`owner` or `member`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py b/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py
deleted file mode 100644
index 819ffc37..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageAudioSpeechesParams"]
-
-
-class UsageAudioSpeechesParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
- combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py
deleted file mode 100644
index 318f85a3..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageAudioTranscriptionsParams"]
-
-
-class UsageAudioTranscriptionsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
- combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py
deleted file mode 100644
index 24322abe..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageCodeInterpreterSessionsParams"]
-
-
-class UsageCodeInterpreterSessionsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py
deleted file mode 100644
index 8bd94d39..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageCompletionsParams"]
-
-
-class UsageCompletionsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- batch: bool
- """If `true`, return batch jobs only.
-
- If `false`, return non-batch jobs only. By default, return both.
- """
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `batch`
- or any combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py b/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py
deleted file mode 100644
index c4a71264..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageEmbeddingsParams"]
-
-
-class UsageEmbeddingsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
- combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_images_params.py b/src/digitalocean_genai_sdk/types/organization/usage_images_params.py
deleted file mode 100644
index 31f2a31f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_images_params.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageImagesParams"]
-
-
-class UsageImagesParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `size`,
- `source` or any combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]]
- """Return only usages for these image sizes.
-
- Possible values are `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792`
- or any combination of them.
- """
-
- sources: List[Literal["image.generation", "image.edit", "image.variation"]]
- """Return only usages for these sources.
-
- Possible values are `image.generation`, `image.edit`, `image.variation` or any
- combination of them.
- """
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py b/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py
deleted file mode 100644
index 438fca8f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageModerationsParams"]
-
-
-class UsageModerationsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
- combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py b/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py
deleted file mode 100644
index dc25f126..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageVectorStoresParams"]
-
-
-class UsageVectorStoresParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
diff --git a/src/digitalocean_genai_sdk/types/organization/user_delete_response.py b/src/digitalocean_genai_sdk/types/organization/user_delete_response.py
deleted file mode 100644
index 5baab3bf..00000000
--- a/src/digitalocean_genai_sdk/types/organization/user_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["UserDeleteResponse"]
-
-
-class UserDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.user.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/organization/user_list_params.py b/src/digitalocean_genai_sdk/types/organization/user_list_params.py
deleted file mode 100644
index c7ad6c74..00000000
--- a/src/digitalocean_genai_sdk/types/organization/user_list_params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import TypedDict
-
-__all__ = ["UserListParams"]
-
-
-class UserListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- emails: List[str]
- """Filter by the email address of users."""
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/user_list_response.py b/src/digitalocean_genai_sdk/types/organization/user_list_response.py
deleted file mode 100644
index 73aaf45b..00000000
--- a/src/digitalocean_genai_sdk/types/organization/user_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .organization_user import OrganizationUser
-
-__all__ = ["UserListResponse"]
-
-
-class UserListResponse(BaseModel):
- data: List[OrganizationUser]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/user_update_params.py b/src/digitalocean_genai_sdk/types/organization/user_update_params.py
deleted file mode 100644
index bc276120..00000000
--- a/src/digitalocean_genai_sdk/types/organization/user_update_params.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UserUpdateParams"]
-
-
-class UserUpdateParams(TypedDict, total=False):
- role: Required[Literal["owner", "reader"]]
- """`owner` or `reader`"""
diff --git a/src/digitalocean_genai_sdk/types/organization_get_costs_params.py b/src/digitalocean_genai_sdk/types/organization_get_costs_params.py
deleted file mode 100644
index e114aa0f..00000000
--- a/src/digitalocean_genai_sdk/types/organization_get_costs_params.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["OrganizationGetCostsParams"]
-
-
-class OrganizationGetCostsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- bucket_width: Literal["1d"]
- """Width of each time bucket in response.
-
- Currently only `1d` is supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "line_item"]]
- """Group the costs by the specified fields.
-
- Support fields include `project_id`, `line_item` and any combination of them.
- """
-
- limit: int
- """A limit on the number of buckets to be returned.
-
- Limit can range between 1 and 180, and the default is 7.
- """
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only costs for these projects."""
diff --git a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py
deleted file mode 100644
index 36b79e57..00000000
--- a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import TypedDict
-
-from .audit_log_event_type import AuditLogEventType
-
-__all__ = ["OrganizationListAuditLogsParams", "EffectiveAt"]
-
-
-class OrganizationListAuditLogsParams(TypedDict, total=False):
- actor_emails: List[str]
- """Return only events performed by users with these emails."""
-
- actor_ids: List[str]
- """Return only events performed by these actors.
-
- Can be a user ID, a service account ID, or an api key tracking ID.
- """
-
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- effective_at: EffectiveAt
- """Return only events whose `effective_at` (Unix seconds) is in this range."""
-
- event_types: List[AuditLogEventType]
- """Return only events with a `type` in one of these values.
-
- For example, `project.created`. For all options, see the documentation for the
- [audit log object](/docs/api-reference/audit-logs/object).
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- project_ids: List[str]
- """Return only events for these projects."""
-
- resource_ids: List[str]
- """Return only events performed on these targets.
-
- For example, a project ID updated.
- """
-
-
-class EffectiveAt(TypedDict, total=False):
- gt: int
- """
- Return only events whose `effective_at` (Unix seconds) is greater than this
- value.
- """
-
- gte: int
- """
- Return only events whose `effective_at` (Unix seconds) is greater than or equal
- to this value.
- """
-
- lt: int
- """Return only events whose `effective_at` (Unix seconds) is less than this value."""
-
- lte: int
- """
- Return only events whose `effective_at` (Unix seconds) is less than or equal to
- this value.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py
deleted file mode 100644
index 751ec527..00000000
--- a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py
+++ /dev/null
@@ -1,433 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-from .audit_log_actor_user import AuditLogActorUser
-from .audit_log_event_type import AuditLogEventType
-
-__all__ = [
- "OrganizationListAuditLogsResponse",
- "Data",
- "DataActor",
- "DataActorAPIKey",
- "DataActorAPIKeyServiceAccount",
- "DataActorSession",
- "DataAPIKeyCreated",
- "DataAPIKeyCreatedData",
- "DataAPIKeyDeleted",
- "DataAPIKeyUpdated",
- "DataAPIKeyUpdatedChangesRequested",
- "DataInviteAccepted",
- "DataInviteDeleted",
- "DataInviteSent",
- "DataInviteSentData",
- "DataLoginFailed",
- "DataLogoutFailed",
- "DataOrganizationUpdated",
- "DataOrganizationUpdatedChangesRequested",
- "DataOrganizationUpdatedChangesRequestedSettings",
- "DataProject",
- "DataProjectArchived",
- "DataProjectCreated",
- "DataProjectCreatedData",
- "DataProjectUpdated",
- "DataProjectUpdatedChangesRequested",
- "DataRateLimitDeleted",
- "DataRateLimitUpdated",
- "DataRateLimitUpdatedChangesRequested",
- "DataServiceAccountCreated",
- "DataServiceAccountCreatedData",
- "DataServiceAccountDeleted",
- "DataServiceAccountUpdated",
- "DataServiceAccountUpdatedChangesRequested",
- "DataUserAdded",
- "DataUserAddedData",
- "DataUserDeleted",
- "DataUserUpdated",
- "DataUserUpdatedChangesRequested",
-]
-
-
-class DataActorAPIKeyServiceAccount(BaseModel):
- id: Optional[str] = None
- """The service account id."""
-
-
-class DataActorAPIKey(BaseModel):
- id: Optional[str] = None
- """The tracking id of the API key."""
-
- service_account: Optional[DataActorAPIKeyServiceAccount] = None
- """The service account that performed the audit logged action."""
-
- type: Optional[Literal["user", "service_account"]] = None
- """The type of API key. Can be either `user` or `service_account`."""
-
- user: Optional[AuditLogActorUser] = None
- """The user who performed the audit logged action."""
-
-
-class DataActorSession(BaseModel):
- ip_address: Optional[str] = None
- """The IP address from which the action was performed."""
-
- user: Optional[AuditLogActorUser] = None
- """The user who performed the audit logged action."""
-
-
-class DataActor(BaseModel):
- api_key: Optional[DataActorAPIKey] = None
- """The API Key used to perform the audit logged action."""
-
- session: Optional[DataActorSession] = None
- """The session in which the audit logged action was performed."""
-
- type: Optional[Literal["session", "api_key"]] = None
- """The type of actor. Is either `session` or `api_key`."""
-
-
-class DataAPIKeyCreatedData(BaseModel):
- scopes: Optional[List[str]] = None
- """A list of scopes allowed for the API key, e.g. `["api.model.request"]`"""
-
-
-class DataAPIKeyCreated(BaseModel):
- id: Optional[str] = None
- """The tracking ID of the API key."""
-
- data: Optional[DataAPIKeyCreatedData] = None
- """The payload used to create the API key."""
-
-
-class DataAPIKeyDeleted(BaseModel):
- id: Optional[str] = None
- """The tracking ID of the API key."""
-
-
-class DataAPIKeyUpdatedChangesRequested(BaseModel):
- scopes: Optional[List[str]] = None
- """A list of scopes allowed for the API key, e.g. `["api.model.request"]`"""
-
-
-class DataAPIKeyUpdated(BaseModel):
- id: Optional[str] = None
- """The tracking ID of the API key."""
-
- changes_requested: Optional[DataAPIKeyUpdatedChangesRequested] = None
- """The payload used to update the API key."""
-
-
-class DataInviteAccepted(BaseModel):
- id: Optional[str] = None
- """The ID of the invite."""
-
-
-class DataInviteDeleted(BaseModel):
- id: Optional[str] = None
- """The ID of the invite."""
-
-
-class DataInviteSentData(BaseModel):
- email: Optional[str] = None
- """The email invited to the organization."""
-
- role: Optional[str] = None
- """The role the email was invited to be. Is either `owner` or `member`."""
-
-
-class DataInviteSent(BaseModel):
- id: Optional[str] = None
- """The ID of the invite."""
-
- data: Optional[DataInviteSentData] = None
- """The payload used to create the invite."""
-
-
-class DataLoginFailed(BaseModel):
- error_code: Optional[str] = None
- """The error code of the failure."""
-
- error_message: Optional[str] = None
- """The error message of the failure."""
-
-
-class DataLogoutFailed(BaseModel):
- error_code: Optional[str] = None
- """The error code of the failure."""
-
- error_message: Optional[str] = None
- """The error message of the failure."""
-
-
-class DataOrganizationUpdatedChangesRequestedSettings(BaseModel):
- threads_ui_visibility: Optional[str] = None
- """
- Visibility of the threads page which shows messages created with the Assistants
- API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.
- """
-
- usage_dashboard_visibility: Optional[str] = None
- """
- Visibility of the usage dashboard which shows activity and costs for your
- organization. One of `ANY_ROLE` or `OWNERS`.
- """
-
-
-class DataOrganizationUpdatedChangesRequested(BaseModel):
- description: Optional[str] = None
- """The organization description."""
-
- name: Optional[str] = None
- """The organization name."""
-
- settings: Optional[DataOrganizationUpdatedChangesRequestedSettings] = None
-
- title: Optional[str] = None
- """The organization title."""
-
-
-class DataOrganizationUpdated(BaseModel):
- id: Optional[str] = None
- """The organization ID."""
-
- changes_requested: Optional[DataOrganizationUpdatedChangesRequested] = None
- """The payload used to update the organization settings."""
-
-
-class DataProject(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
- name: Optional[str] = None
- """The project title."""
-
-
-class DataProjectArchived(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
-
-class DataProjectCreatedData(BaseModel):
- name: Optional[str] = None
- """The project name."""
-
- title: Optional[str] = None
- """The title of the project as seen on the dashboard."""
-
-
-class DataProjectCreated(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
- data: Optional[DataProjectCreatedData] = None
- """The payload used to create the project."""
-
-
-class DataProjectUpdatedChangesRequested(BaseModel):
- title: Optional[str] = None
- """The title of the project as seen on the dashboard."""
-
-
-class DataProjectUpdated(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
- changes_requested: Optional[DataProjectUpdatedChangesRequested] = None
- """The payload used to update the project."""
-
-
-class DataRateLimitDeleted(BaseModel):
- id: Optional[str] = None
- """The rate limit ID"""
-
-
-class DataRateLimitUpdatedChangesRequested(BaseModel):
- batch_1_day_max_input_tokens: Optional[int] = None
- """The maximum batch input tokens per day. Only relevant for certain models."""
-
- max_audio_megabytes_per_1_minute: Optional[int] = None
- """The maximum audio megabytes per minute. Only relevant for certain models."""
-
- max_images_per_1_minute: Optional[int] = None
- """The maximum images per minute. Only relevant for certain models."""
-
- max_requests_per_1_day: Optional[int] = None
- """The maximum requests per day. Only relevant for certain models."""
-
- max_requests_per_1_minute: Optional[int] = None
- """The maximum requests per minute."""
-
- max_tokens_per_1_minute: Optional[int] = None
- """The maximum tokens per minute."""
-
-
-class DataRateLimitUpdated(BaseModel):
- id: Optional[str] = None
- """The rate limit ID"""
-
- changes_requested: Optional[DataRateLimitUpdatedChangesRequested] = None
- """The payload used to update the rate limits."""
-
-
-class DataServiceAccountCreatedData(BaseModel):
- role: Optional[str] = None
- """The role of the service account. Is either `owner` or `member`."""
-
-
-class DataServiceAccountCreated(BaseModel):
- id: Optional[str] = None
- """The service account ID."""
-
- data: Optional[DataServiceAccountCreatedData] = None
- """The payload used to create the service account."""
-
-
-class DataServiceAccountDeleted(BaseModel):
- id: Optional[str] = None
- """The service account ID."""
-
-
-class DataServiceAccountUpdatedChangesRequested(BaseModel):
- role: Optional[str] = None
- """The role of the service account. Is either `owner` or `member`."""
-
-
-class DataServiceAccountUpdated(BaseModel):
- id: Optional[str] = None
- """The service account ID."""
-
- changes_requested: Optional[DataServiceAccountUpdatedChangesRequested] = None
- """The payload used to updated the service account."""
-
-
-class DataUserAddedData(BaseModel):
- role: Optional[str] = None
- """The role of the user. Is either `owner` or `member`."""
-
-
-class DataUserAdded(BaseModel):
- id: Optional[str] = None
- """The user ID."""
-
- data: Optional[DataUserAddedData] = None
- """The payload used to add the user to the project."""
-
-
-class DataUserDeleted(BaseModel):
- id: Optional[str] = None
- """The user ID."""
-
-
-class DataUserUpdatedChangesRequested(BaseModel):
- role: Optional[str] = None
- """The role of the user. Is either `owner` or `member`."""
-
-
-class DataUserUpdated(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
- changes_requested: Optional[DataUserUpdatedChangesRequested] = None
- """The payload used to update the user."""
-
-
-class Data(BaseModel):
- id: str
- """The ID of this log."""
-
- actor: DataActor
- """The actor who performed the audit logged action."""
-
- effective_at: int
- """The Unix timestamp (in seconds) of the event."""
-
- type: AuditLogEventType
- """The event type."""
-
- api_key_created: Optional[DataAPIKeyCreated] = FieldInfo(alias="api_key.created", default=None)
- """The details for events with this `type`."""
-
- api_key_deleted: Optional[DataAPIKeyDeleted] = FieldInfo(alias="api_key.deleted", default=None)
- """The details for events with this `type`."""
-
- api_key_updated: Optional[DataAPIKeyUpdated] = FieldInfo(alias="api_key.updated", default=None)
- """The details for events with this `type`."""
-
- invite_accepted: Optional[DataInviteAccepted] = FieldInfo(alias="invite.accepted", default=None)
- """The details for events with this `type`."""
-
- invite_deleted: Optional[DataInviteDeleted] = FieldInfo(alias="invite.deleted", default=None)
- """The details for events with this `type`."""
-
- invite_sent: Optional[DataInviteSent] = FieldInfo(alias="invite.sent", default=None)
- """The details for events with this `type`."""
-
- login_failed: Optional[DataLoginFailed] = FieldInfo(alias="login.failed", default=None)
- """The details for events with this `type`."""
-
- logout_failed: Optional[DataLogoutFailed] = FieldInfo(alias="logout.failed", default=None)
- """The details for events with this `type`."""
-
- organization_updated: Optional[DataOrganizationUpdated] = FieldInfo(alias="organization.updated", default=None)
- """The details for events with this `type`."""
-
- project: Optional[DataProject] = None
- """The project that the action was scoped to.
-
- Absent for actions not scoped to projects.
- """
-
- project_archived: Optional[DataProjectArchived] = FieldInfo(alias="project.archived", default=None)
- """The details for events with this `type`."""
-
- project_created: Optional[DataProjectCreated] = FieldInfo(alias="project.created", default=None)
- """The details for events with this `type`."""
-
- project_updated: Optional[DataProjectUpdated] = FieldInfo(alias="project.updated", default=None)
- """The details for events with this `type`."""
-
- rate_limit_deleted: Optional[DataRateLimitDeleted] = FieldInfo(alias="rate_limit.deleted", default=None)
- """The details for events with this `type`."""
-
- rate_limit_updated: Optional[DataRateLimitUpdated] = FieldInfo(alias="rate_limit.updated", default=None)
- """The details for events with this `type`."""
-
- service_account_created: Optional[DataServiceAccountCreated] = FieldInfo(
- alias="service_account.created", default=None
- )
- """The details for events with this `type`."""
-
- service_account_deleted: Optional[DataServiceAccountDeleted] = FieldInfo(
- alias="service_account.deleted", default=None
- )
- """The details for events with this `type`."""
-
- service_account_updated: Optional[DataServiceAccountUpdated] = FieldInfo(
- alias="service_account.updated", default=None
- )
- """The details for events with this `type`."""
-
- user_added: Optional[DataUserAdded] = FieldInfo(alias="user.added", default=None)
- """The details for events with this `type`."""
-
- user_deleted: Optional[DataUserDeleted] = FieldInfo(alias="user.deleted", default=None)
- """The details for events with this `type`."""
-
- user_updated: Optional[DataUserUpdated] = FieldInfo(alias="user.updated", default=None)
- """The details for events with this `type`."""
-
-
-class OrganizationListAuditLogsResponse(BaseModel):
- data: List[Data]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/output_message.py b/src/digitalocean_genai_sdk/types/output_message.py
deleted file mode 100644
index 4db6e72e..00000000
--- a/src/digitalocean_genai_sdk/types/output_message.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-
-__all__ = [
- "OutputMessage",
- "Content",
- "ContentOutputText",
- "ContentOutputTextAnnotation",
- "ContentOutputTextAnnotationFileCitation",
- "ContentOutputTextAnnotationURLCitation",
- "ContentOutputTextAnnotationFilePath",
- "ContentRefusal",
-]
-
-
-class ContentOutputTextAnnotationFileCitation(BaseModel):
- file_id: str
- """The ID of the file."""
-
- index: int
- """The index of the file in the list of files."""
-
- type: Literal["file_citation"]
- """The type of the file citation. Always `file_citation`."""
-
-
-class ContentOutputTextAnnotationURLCitation(BaseModel):
- end_index: int
- """The index of the last character of the URL citation in the message."""
-
- start_index: int
- """The index of the first character of the URL citation in the message."""
-
- title: str
- """The title of the web resource."""
-
- type: Literal["url_citation"]
- """The type of the URL citation. Always `url_citation`."""
-
- url: str
- """The URL of the web resource."""
-
-
-class ContentOutputTextAnnotationFilePath(BaseModel):
- file_id: str
- """The ID of the file."""
-
- index: int
- """The index of the file in the list of files."""
-
- type: Literal["file_path"]
- """The type of the file path. Always `file_path`."""
-
-
-ContentOutputTextAnnotation: TypeAlias = Union[
- ContentOutputTextAnnotationFileCitation, ContentOutputTextAnnotationURLCitation, ContentOutputTextAnnotationFilePath
-]
-
-
-class ContentOutputText(BaseModel):
- annotations: List[ContentOutputTextAnnotation]
- """The annotations of the text output."""
-
- text: str
- """The text output from the model."""
-
- type: Literal["output_text"]
- """The type of the output text. Always `output_text`."""
-
-
-class ContentRefusal(BaseModel):
- refusal: str
- """The refusal explanationfrom the model."""
-
- type: Literal["refusal"]
- """The type of the refusal. Always `refusal`."""
-
-
-Content: TypeAlias = Union[ContentOutputText, ContentRefusal]
-
-
-class OutputMessage(BaseModel):
- id: str
- """The unique ID of the output message."""
-
- content: List[Content]
- """The content of the output message."""
-
- role: Literal["assistant"]
- """The role of the output message. Always `assistant`."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
-
- type: Literal["message"]
- """The type of the output message. Always `message`."""
diff --git a/src/digitalocean_genai_sdk/types/output_message_param.py b/src/digitalocean_genai_sdk/types/output_message_param.py
deleted file mode 100644
index 83f13e18..00000000
--- a/src/digitalocean_genai_sdk/types/output_message_param.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = [
- "OutputMessageParam",
- "Content",
- "ContentOutputText",
- "ContentOutputTextAnnotation",
- "ContentOutputTextAnnotationFileCitation",
- "ContentOutputTextAnnotationURLCitation",
- "ContentOutputTextAnnotationFilePath",
- "ContentRefusal",
-]
-
-
-class ContentOutputTextAnnotationFileCitation(TypedDict, total=False):
- file_id: Required[str]
- """The ID of the file."""
-
- index: Required[int]
- """The index of the file in the list of files."""
-
- type: Required[Literal["file_citation"]]
- """The type of the file citation. Always `file_citation`."""
-
-
-class ContentOutputTextAnnotationURLCitation(TypedDict, total=False):
- end_index: Required[int]
- """The index of the last character of the URL citation in the message."""
-
- start_index: Required[int]
- """The index of the first character of the URL citation in the message."""
-
- title: Required[str]
- """The title of the web resource."""
-
- type: Required[Literal["url_citation"]]
- """The type of the URL citation. Always `url_citation`."""
-
- url: Required[str]
- """The URL of the web resource."""
-
-
-class ContentOutputTextAnnotationFilePath(TypedDict, total=False):
- file_id: Required[str]
- """The ID of the file."""
-
- index: Required[int]
- """The index of the file in the list of files."""
-
- type: Required[Literal["file_path"]]
- """The type of the file path. Always `file_path`."""
-
-
-ContentOutputTextAnnotation: TypeAlias = Union[
- ContentOutputTextAnnotationFileCitation, ContentOutputTextAnnotationURLCitation, ContentOutputTextAnnotationFilePath
-]
-
-
-class ContentOutputText(TypedDict, total=False):
- annotations: Required[Iterable[ContentOutputTextAnnotation]]
- """The annotations of the text output."""
-
- text: Required[str]
- """The text output from the model."""
-
- type: Required[Literal["output_text"]]
- """The type of the output text. Always `output_text`."""
-
-
-class ContentRefusal(TypedDict, total=False):
- refusal: Required[str]
- """The refusal explanationfrom the model."""
-
- type: Required[Literal["refusal"]]
- """The type of the refusal. Always `refusal`."""
-
-
-Content: TypeAlias = Union[ContentOutputText, ContentRefusal]
-
-
-class OutputMessageParam(TypedDict, total=False):
- id: Required[str]
- """The unique ID of the output message."""
-
- content: Required[Iterable[Content]]
- """The content of the output message."""
-
- role: Required[Literal["assistant"]]
- """The role of the output message. Always `assistant`."""
-
- status: Required[Literal["in_progress", "completed", "incomplete"]]
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
-
- type: Required[Literal["message"]]
- """The type of the output message. Always `message`."""
diff --git a/src/digitalocean_genai_sdk/types/realtime_create_session_params.py b/src/digitalocean_genai_sdk/types/realtime_create_session_params.py
deleted file mode 100644
index df105bac..00000000
--- a/src/digitalocean_genai_sdk/types/realtime_create_session_params.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal, TypedDict
-
-from .voice_ids_shared_param import VoiceIDsSharedParam
-
-__all__ = [
- "RealtimeCreateSessionParams",
- "InputAudioNoiseReduction",
- "InputAudioTranscription",
- "Tool",
- "TurnDetection",
-]
-
-
-class RealtimeCreateSessionParams(TypedDict, total=False):
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
- """The format of input audio.
-
- Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
- be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
- byte order.
- """
-
- input_audio_noise_reduction: InputAudioNoiseReduction
- """Configuration for input audio noise reduction.
-
- This can be set to `null` to turn off. Noise reduction filters audio added to
- the input audio buffer before it is sent to VAD and the model. Filtering the
- audio can improve VAD and turn detection accuracy (reducing false positives) and
- model performance by improving perception of the input audio.
- """
-
- input_audio_transcription: InputAudioTranscription
- """
- Configuration for input audio transcription, defaults to off and can be set to
- `null` to turn off once on. Input audio transcription is not native to the
- model, since the model consumes audio directly. Transcription runs
- asynchronously through
- [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
- and should be treated as guidance of input audio content rather than precisely
- what the model heard. The client can optionally set the language and prompt for
- transcription, these offer additional guidance to the transcription service.
- """
-
- instructions: str
- """The default system instructions (i.e.
-
- system message) prepended to model calls. This field allows the client to guide
- the model on desired responses. The model can be instructed on response content
- and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
- good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
- into your voice", "laugh frequently"). The instructions are not guaranteed to be
- followed by the model, but they provide guidance to the model on the desired
- behavior.
-
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
- """
-
- max_response_output_tokens: Union[int, Literal["inf"]]
- """
- Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
- """
-
- modalities: List[Literal["text", "audio"]]
- """The set of modalities the model can respond with.
-
- To disable audio, set this to ["text"].
- """
-
- model: Literal[
- "gpt-4o-realtime-preview",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- ]
- """The Realtime model used for this session."""
-
- output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
- """The format of output audio.
-
- Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
- sampled at a rate of 24kHz.
- """
-
- temperature: float
- """Sampling temperature for the model, limited to [0.6, 1.2].
-
- For audio models a temperature of 0.8 is highly recommended for best
- performance.
- """
-
- tool_choice: str
- """How the model chooses tools.
-
- Options are `auto`, `none`, `required`, or specify a function.
- """
-
- tools: Iterable[Tool]
- """Tools (functions) available to the model."""
-
- turn_detection: TurnDetection
- """Configuration for turn detection, ether Server VAD or Semantic VAD.
-
- This can be set to `null` to turn off, in which case the client must manually
- trigger model response. Server VAD means that the model will detect the start
- and end of speech based on audio volume and respond at the end of user speech.
- Semantic VAD is more advanced and uses a turn detection model (in conjuction
- with VAD) to semantically estimate whether the user has finished speaking, then
- dynamically sets a timeout based on this probability. For example, if user audio
- trails off with "uhhm", the model will score a low probability of turn end and
- wait longer for the user to continue speaking. This can be useful for more
- natural conversations, but may have a higher latency.
- """
-
- voice: VoiceIDsSharedParam
- """The voice the model uses to respond.
-
- Voice cannot be changed during the session once the model has responded with
- audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
- `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
- """
-
-
-class InputAudioNoiseReduction(TypedDict, total=False):
- type: Literal["near_field", "far_field"]
- """Type of noise reduction.
-
- `near_field` is for close-talking microphones such as headphones, `far_field` is
- for far-field microphones such as laptop or conference room microphones.
- """
-
-
-class InputAudioTranscription(TypedDict, total=False):
- language: str
- """The language of the input audio.
-
- Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
- """
-
- model: str
- """
- The model to use for transcription, current options are `gpt-4o-transcribe`,
- `gpt-4o-mini-transcribe`, and `whisper-1`.
- """
-
- prompt: str
- """
- An optional text to guide the model's style or continue a previous audio
- segment. For `whisper-1`, the
- [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For
- `gpt-4o-transcribe` models, the prompt is a free text string, for example
- "expect words related to technology".
- """
-
-
-class Tool(TypedDict, total=False):
- description: str
- """
- The description of the function, including guidance on when and how to call it,
- and guidance about what to tell the user when calling (if anything).
- """
-
- name: str
- """The name of the function."""
-
- parameters: object
- """Parameters of the function in JSON Schema."""
-
- type: Literal["function"]
- """The type of the tool, i.e. `function`."""
-
-
-class TurnDetection(TypedDict, total=False):
- create_response: bool
- """
- Whether or not to automatically generate a response when a VAD stop event
- occurs.
- """
-
- eagerness: Literal["low", "medium", "high", "auto"]
- """Used only for `semantic_vad` mode.
-
- The eagerness of the model to respond. `low` will wait longer for the user to
- continue speaking, `high` will respond more quickly. `auto` is the default and
- is equivalent to `medium`.
- """
-
- interrupt_response: bool
- """
- Whether or not to automatically interrupt any ongoing response with output to
- the default conversation (i.e. `conversation` of `auto`) when a VAD start event
- occurs.
- """
-
- prefix_padding_ms: int
- """Used only for `server_vad` mode.
-
- Amount of audio to include before the VAD detected speech (in milliseconds).
- Defaults to 300ms.
- """
-
- silence_duration_ms: int
- """Used only for `server_vad` mode.
-
- Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
- With shorter values the model will respond more quickly, but may jump in on
- short pauses from the user.
- """
-
- threshold: float
- """Used only for `server_vad` mode.
-
- Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
- threshold will require louder audio to activate the model, and thus might
- perform better in noisy environments.
- """
-
- type: Literal["server_vad", "semantic_vad"]
- """Type of turn detection."""
diff --git a/src/digitalocean_genai_sdk/types/realtime_create_session_response.py b/src/digitalocean_genai_sdk/types/realtime_create_session_response.py
deleted file mode 100644
index 1b7bc03c..00000000
--- a/src/digitalocean_genai_sdk/types/realtime_create_session_response.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .voice_ids_shared import VoiceIDsShared
-
-__all__ = ["RealtimeCreateSessionResponse", "ClientSecret", "InputAudioTranscription", "Tool", "TurnDetection"]
-
-
-class ClientSecret(BaseModel):
- expires_at: int
- """Timestamp for when the token expires.
-
- Currently, all tokens expire after one minute.
- """
-
- value: str
- """
- Ephemeral key usable in client environments to authenticate connections to the
- Realtime API. Use this in client-side environments rather than a standard API
- token, which should only be used server-side.
- """
-
-
-class InputAudioTranscription(BaseModel):
- model: Optional[str] = None
- """
- The model to use for transcription, `whisper-1` is the only currently supported
- model.
- """
-
-
-class Tool(BaseModel):
- description: Optional[str] = None
- """
- The description of the function, including guidance on when and how to call it,
- and guidance about what to tell the user when calling (if anything).
- """
-
- name: Optional[str] = None
- """The name of the function."""
-
- parameters: Optional[object] = None
- """Parameters of the function in JSON Schema."""
-
- type: Optional[Literal["function"]] = None
- """The type of the tool, i.e. `function`."""
-
-
-class TurnDetection(BaseModel):
- prefix_padding_ms: Optional[int] = None
- """Amount of audio to include before the VAD detected speech (in milliseconds).
-
- Defaults to 300ms.
- """
-
- silence_duration_ms: Optional[int] = None
- """Duration of silence to detect speech stop (in milliseconds).
-
- Defaults to 500ms. With shorter values the model will respond more quickly, but
- may jump in on short pauses from the user.
- """
-
- threshold: Optional[float] = None
- """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
-
- A higher threshold will require louder audio to activate the model, and thus
- might perform better in noisy environments.
- """
-
- type: Optional[str] = None
- """Type of turn detection, only `server_vad` is currently supported."""
-
-
-class RealtimeCreateSessionResponse(BaseModel):
- client_secret: ClientSecret
- """Ephemeral key returned by the API."""
-
- input_audio_format: Optional[str] = None
- """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
-
- input_audio_transcription: Optional[InputAudioTranscription] = None
- """
- Configuration for input audio transcription, defaults to off and can be set to
- `null` to turn off once on. Input audio transcription is not native to the
- model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
- """
-
- instructions: Optional[str] = None
- """The default system instructions (i.e.
-
- system message) prepended to model calls. This field allows the client to guide
- the model on desired responses. The model can be instructed on response content
- and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
- good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
- into your voice", "laugh frequently"). The instructions are not guaranteed to be
- followed by the model, but they provide guidance to the model on the desired
- behavior.
-
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
- """
-
- max_response_output_tokens: Union[int, Literal["inf"], None] = None
- """
- Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
- """
-
- modalities: Optional[List[Literal["text", "audio"]]] = None
- """The set of modalities the model can respond with.
-
- To disable audio, set this to ["text"].
- """
-
- output_audio_format: Optional[str] = None
- """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
-
- temperature: Optional[float] = None
- """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
-
- tool_choice: Optional[str] = None
- """How the model chooses tools.
-
- Options are `auto`, `none`, `required`, or specify a function.
- """
-
- tools: Optional[List[Tool]] = None
- """Tools (functions) available to the model."""
-
- turn_detection: Optional[TurnDetection] = None
- """Configuration for turn detection.
-
- Can be set to `null` to turn off. Server VAD means that the model will detect
- the start and end of speech based on audio volume and respond at the end of user
- speech.
- """
-
- voice: Optional[VoiceIDsShared] = None
- """The voice the model uses to respond.
-
- Voice cannot be changed during the session once the model has responded with
- audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
- `coral`, `echo` `sage`, `shimmer` and `verse`.
- """
diff --git a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py
deleted file mode 100644
index 21912679..00000000
--- a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, TypedDict
-
-__all__ = [
- "RealtimeCreateTranscriptionSessionParams",
- "InputAudioNoiseReduction",
- "InputAudioTranscription",
- "TurnDetection",
-]
-
-
-class RealtimeCreateTranscriptionSessionParams(TypedDict, total=False):
- include: List[str]
- """The set of items to include in the transcription. Current available items are:
-
- - `item.input_audio_transcription.logprobs`
- """
-
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
- """The format of input audio.
-
- Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
- be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
- byte order.
- """
-
- input_audio_noise_reduction: InputAudioNoiseReduction
- """Configuration for input audio noise reduction.
-
- This can be set to `null` to turn off. Noise reduction filters audio added to
- the input audio buffer before it is sent to VAD and the model. Filtering the
- audio can improve VAD and turn detection accuracy (reducing false positives) and
- model performance by improving perception of the input audio.
- """
-
- input_audio_transcription: InputAudioTranscription
- """Configuration for input audio transcription.
-
- The client can optionally set the language and prompt for transcription, these
- offer additional guidance to the transcription service.
- """
-
- modalities: List[Literal["text", "audio"]]
- """The set of modalities the model can respond with.
-
- To disable audio, set this to ["text"].
- """
-
- turn_detection: TurnDetection
- """Configuration for turn detection, ether Server VAD or Semantic VAD.
-
- This can be set to `null` to turn off, in which case the client must manually
- trigger model response. Server VAD means that the model will detect the start
- and end of speech based on audio volume and respond at the end of user speech.
- Semantic VAD is more advanced and uses a turn detection model (in conjuction
- with VAD) to semantically estimate whether the user has finished speaking, then
- dynamically sets a timeout based on this probability. For example, if user audio
- trails off with "uhhm", the model will score a low probability of turn end and
- wait longer for the user to continue speaking. This can be useful for more
- natural conversations, but may have a higher latency.
- """
-
-
-class InputAudioNoiseReduction(TypedDict, total=False):
- type: Literal["near_field", "far_field"]
- """Type of noise reduction.
-
- `near_field` is for close-talking microphones such as headphones, `far_field` is
- for far-field microphones such as laptop or conference room microphones.
- """
-
-
-class InputAudioTranscription(TypedDict, total=False):
- language: str
- """The language of the input audio.
-
- Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
- """
-
- model: Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]
- """
- The model to use for transcription, current options are `gpt-4o-transcribe`,
- `gpt-4o-mini-transcribe`, and `whisper-1`.
- """
-
- prompt: str
- """
- An optional text to guide the model's style or continue a previous audio
- segment. For `whisper-1`, the
- [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For
- `gpt-4o-transcribe` models, the prompt is a free text string, for example
- "expect words related to technology".
- """
-
-
-class TurnDetection(TypedDict, total=False):
- create_response: bool
- """Whether or not to automatically generate a response when a VAD stop event
- occurs.
-
- Not available for transcription sessions.
- """
-
- eagerness: Literal["low", "medium", "high", "auto"]
- """Used only for `semantic_vad` mode.
-
- The eagerness of the model to respond. `low` will wait longer for the user to
- continue speaking, `high` will respond more quickly. `auto` is the default and
- is equivalent to `medium`.
- """
-
- interrupt_response: bool
- """
- Whether or not to automatically interrupt any ongoing response with output to
- the default conversation (i.e. `conversation` of `auto`) when a VAD start event
- occurs. Not available for transcription sessions.
- """
-
- prefix_padding_ms: int
- """Used only for `server_vad` mode.
-
- Amount of audio to include before the VAD detected speech (in milliseconds).
- Defaults to 300ms.
- """
-
- silence_duration_ms: int
- """Used only for `server_vad` mode.
-
- Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
- With shorter values the model will respond more quickly, but may jump in on
- short pauses from the user.
- """
-
- threshold: float
- """Used only for `server_vad` mode.
-
- Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
- threshold will require louder audio to activate the model, and thus might
- perform better in noisy environments.
- """
-
- type: Literal["server_vad", "semantic_vad"]
- """Type of turn detection."""
diff --git a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py
deleted file mode 100644
index bbd0b9de..00000000
--- a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["RealtimeCreateTranscriptionSessionResponse", "ClientSecret", "InputAudioTranscription", "TurnDetection"]
-
-
-class ClientSecret(BaseModel):
- expires_at: int
- """Timestamp for when the token expires.
-
- Currently, all tokens expire after one minute.
- """
-
- value: str
- """
- Ephemeral key usable in client environments to authenticate connections to the
- Realtime API. Use this in client-side environments rather than a standard API
- token, which should only be used server-side.
- """
-
-
-class InputAudioTranscription(BaseModel):
- language: Optional[str] = None
- """The language of the input audio.
-
- Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
- """
-
- model: Optional[Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]] = None
- """The model to use for transcription.
-
- Can be `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, or `whisper-1`.
- """
-
- prompt: Optional[str] = None
- """An optional text to guide the model's style or continue a previous audio
- segment.
-
- The [prompt](/docs/guides/speech-to-text#prompting) should match the audio
- language.
- """
-
-
-class TurnDetection(BaseModel):
- prefix_padding_ms: Optional[int] = None
- """Amount of audio to include before the VAD detected speech (in milliseconds).
-
- Defaults to 300ms.
- """
-
- silence_duration_ms: Optional[int] = None
- """Duration of silence to detect speech stop (in milliseconds).
-
- Defaults to 500ms. With shorter values the model will respond more quickly, but
- may jump in on short pauses from the user.
- """
-
- threshold: Optional[float] = None
- """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
-
- A higher threshold will require louder audio to activate the model, and thus
- might perform better in noisy environments.
- """
-
- type: Optional[str] = None
- """Type of turn detection, only `server_vad` is currently supported."""
-
-
-class RealtimeCreateTranscriptionSessionResponse(BaseModel):
- client_secret: ClientSecret
- """Ephemeral key returned by the API.
-
- Only present when the session is created on the server via REST API.
- """
-
- input_audio_format: Optional[str] = None
- """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
-
- input_audio_transcription: Optional[InputAudioTranscription] = None
- """Configuration of the transcription model."""
-
- modalities: Optional[List[Literal["text", "audio"]]] = None
- """The set of modalities the model can respond with.
-
- To disable audio, set this to ["text"].
- """
-
- turn_detection: Optional[TurnDetection] = None
- """Configuration for turn detection.
-
- Can be set to `null` to turn off. Server VAD means that the model will detect
- the start and end of speech based on audio volume and respond at the end of user
- speech.
- """
diff --git a/src/digitalocean_genai_sdk/types/reasoning_effort.py b/src/digitalocean_genai_sdk/types/reasoning_effort.py
deleted file mode 100644
index ace21b67..00000000
--- a/src/digitalocean_genai_sdk/types/reasoning_effort.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["ReasoningEffort"]
-
-ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
diff --git a/src/digitalocean_genai_sdk/types/reasoning_item.py b/src/digitalocean_genai_sdk/types/reasoning_item.py
deleted file mode 100644
index 28a64183..00000000
--- a/src/digitalocean_genai_sdk/types/reasoning_item.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ReasoningItem", "Summary"]
-
-
-class Summary(BaseModel):
- text: str
- """
- A short summary of the reasoning used by the model when generating the response.
- """
-
- type: Literal["summary_text"]
- """The type of the object. Always `summary_text`."""
-
-
-class ReasoningItem(BaseModel):
- id: str
- """The unique identifier of the reasoning content."""
-
- summary: List[Summary]
- """Reasoning text contents."""
-
- type: Literal["reasoning"]
- """The type of the object. Always `reasoning`."""
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/reasoning_item_param.py b/src/digitalocean_genai_sdk/types/reasoning_item_param.py
deleted file mode 100644
index 4d2a0504..00000000
--- a/src/digitalocean_genai_sdk/types/reasoning_item_param.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ReasoningItemParam", "Summary"]
-
-
-class Summary(TypedDict, total=False):
- text: Required[str]
- """
- A short summary of the reasoning used by the model when generating the response.
- """
-
- type: Required[Literal["summary_text"]]
- """The type of the object. Always `summary_text`."""
-
-
-class ReasoningItemParam(TypedDict, total=False):
- id: Required[str]
- """The unique identifier of the reasoning content."""
-
- summary: Required[Iterable[Summary]]
- """Reasoning text contents."""
-
- type: Required[Literal["reasoning"]]
- """The type of the object. Always `reasoning`."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/response.py b/src/digitalocean_genai_sdk/types/response.py
deleted file mode 100644
index 523eedfc..00000000
--- a/src/digitalocean_genai_sdk/types/response.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-from .output_message import OutputMessage
-from .reasoning_item import ReasoningItem
-from .computer_tool_call import ComputerToolCall
-from .function_tool_call import FunctionToolCall
-from .response_properties import ResponseProperties
-from .web_search_tool_call import WebSearchToolCall
-from .file_search_tool_call import FileSearchToolCall
-from .model_response_properties import ModelResponseProperties
-
-__all__ = [
- "Response",
- "ResponseError",
- "ResponseIncompleteDetails",
- "ResponseOutput",
- "ResponseUsage",
- "ResponseUsageInputTokensDetails",
- "ResponseUsageOutputTokensDetails",
-]
-
-
-class ResponseError(BaseModel):
- code: Literal[
- "server_error",
- "rate_limit_exceeded",
- "invalid_prompt",
- "vector_store_timeout",
- "invalid_image",
- "invalid_image_format",
- "invalid_base64_image",
- "invalid_image_url",
- "image_too_large",
- "image_too_small",
- "image_parse_error",
- "image_content_policy_violation",
- "invalid_image_mode",
- "image_file_too_large",
- "unsupported_image_media_type",
- "empty_image_file",
- "failed_to_download_image",
- "image_file_not_found",
- ]
- """The error code for the response."""
-
- message: str
- """A human-readable description of the error."""
-
-
-class ResponseIncompleteDetails(BaseModel):
- reason: Optional[Literal["max_output_tokens", "content_filter"]] = None
- """The reason why the response is incomplete."""
-
-
-ResponseOutput: TypeAlias = Annotated[
- Union[OutputMessage, FileSearchToolCall, FunctionToolCall, WebSearchToolCall, ComputerToolCall, ReasoningItem],
- PropertyInfo(discriminator="type"),
-]
-
-
-class ResponseUsageInputTokensDetails(BaseModel):
- cached_tokens: int
- """The number of tokens that were retrieved from the cache.
-
- [More on prompt caching](/docs/guides/prompt-caching).
- """
-
-
-class ResponseUsageOutputTokensDetails(BaseModel):
- reasoning_tokens: int
- """The number of reasoning tokens."""
-
-
-class ResponseUsage(BaseModel):
- input_tokens: int
- """The number of input tokens."""
-
- input_tokens_details: ResponseUsageInputTokensDetails
- """A detailed breakdown of the input tokens."""
-
- output_tokens: int
- """The number of output tokens."""
-
- output_tokens_details: ResponseUsageOutputTokensDetails
- """A detailed breakdown of the output tokens."""
-
- total_tokens: int
- """The total number of tokens used."""
-
-
-class Response(ModelResponseProperties, ResponseProperties):
- id: str
- """Unique identifier for this Response."""
-
- created_at: float
- """Unix timestamp (in seconds) of when this Response was created."""
-
- error: Optional[ResponseError] = None
- """An error object returned when the model fails to generate a Response."""
-
- incomplete_details: Optional[ResponseIncompleteDetails] = None
- """Details about why the response is incomplete."""
-
- object: Literal["response"]
- """The object type of this resource - always set to `response`."""
-
- output: List[ResponseOutput]
- """An array of content items generated by the model.
-
- - The length and order of items in the `output` array is dependent on the
- model's response.
- - Rather than accessing the first item in the `output` array and assuming it's
- an `assistant` message with the content generated by the model, you might
- consider using the `output_text` property where supported in SDKs.
- """
-
- parallel_tool_calls: bool
- """Whether to allow the model to run tool calls in parallel."""
-
- output_text: Optional[str] = None
- """
- SDK-only convenience property that contains the aggregated text output from all
- `output_text` items in the `output` array, if any are present. Supported in the
- Python and JavaScript SDKs.
- """
-
- status: Optional[Literal["completed", "failed", "in_progress", "incomplete"]] = None
- """The status of the response generation.
-
- One of `completed`, `failed`, `in_progress`, or `incomplete`.
- """
-
- usage: Optional[ResponseUsage] = None
- """
- Represents token usage details including input tokens, output tokens, a
- breakdown of output tokens, and the total tokens used.
- """
diff --git a/src/digitalocean_genai_sdk/types/response_create_params.py b/src/digitalocean_genai_sdk/types/response_create_params.py
deleted file mode 100644
index 878e53a5..00000000
--- a/src/digitalocean_genai_sdk/types/response_create_params.py
+++ /dev/null
@@ -1,494 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .includable import Includable
-from .reasoning_effort import ReasoningEffort
-from .input_content_param import InputContentParam
-from .input_message_param import InputMessageParam
-from .output_message_param import OutputMessageParam
-from .reasoning_item_param import ReasoningItemParam
-from .compound_filter_param import CompoundFilterParam
-from .comparison_filter_param import ComparisonFilterParam
-from .computer_tool_call_param import ComputerToolCallParam
-from .function_tool_call_param import FunctionToolCallParam
-from .web_search_tool_call_param import WebSearchToolCallParam
-from .file_search_tool_call_param import FileSearchToolCallParam
-from .chat.web_search_context_size import WebSearchContextSize
-from .chat.web_search_location_param import WebSearchLocationParam
-from .chat.response_format_text_param import ResponseFormatTextParam
-from .computer_tool_call_output_param import ComputerToolCallOutputParam
-from .function_tool_call_output_param import FunctionToolCallOutputParam
-from .chat.response_format_json_object_param import ResponseFormatJsonObjectParam
-
-__all__ = [
- "ResponseCreateParams",
- "InputInputItemList",
- "InputInputItemListMessage",
- "InputInputItemListItemReference",
- "Reasoning",
- "Text",
- "TextFormat",
- "TextFormatTextResponseFormatJsonSchema",
- "ToolChoice",
- "ToolChoiceToolChoiceTypes",
- "ToolChoiceToolChoiceFunction",
- "Tool",
- "ToolFileSearchTool",
- "ToolFileSearchToolFilters",
- "ToolFileSearchToolRankingOptions",
- "ToolFunctionTool",
- "ToolComputerTool",
- "ToolWebSearchTool",
- "ToolWebSearchToolUserLocation",
-]
-
-
-class ResponseCreateParams(TypedDict, total=False):
- input: Required[Union[str, Iterable[InputInputItemList]]]
- """Text, image, or file inputs to the model, used to generate a response.
-
- Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Image inputs](/docs/guides/images)
- - [File inputs](/docs/guides/pdf-files)
- - [Conversation state](/docs/guides/conversation-state)
- - [Function calling](/docs/guides/function-calling)
- """
-
- model: Required[
- Union[
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- "o1-pro",
- "o1-pro-2025-03-19",
- "computer-use-preview",
- "computer-use-preview-2025-03-11",
- ],
- str,
- ]
- ]
- """Model ID used to generate the response, like `gpt-4o` or `o1`.
-
- OpenAI offers a wide range of models with different capabilities, performance
- characteristics, and price points. Refer to the [model guide](/docs/models) to
- browse and compare available models.
- """
-
- include: Optional[List[Includable]]
- """Specify additional output data to include in the model response.
-
- Currently supported values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
- """
-
- instructions: Optional[str]
- """
- Inserts a system (or developer) message as the first item in the model's
- context.
-
- When using along with `previous_response_id`, the instructions from a previous
- response will be not be carried over to the next response. This makes it simple
- to swap out system (or developer) messages in new responses.
- """
-
- max_output_tokens: Optional[int]
- """
- An upper bound for the number of tokens that can be generated for a response,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- parallel_tool_calls: Optional[bool]
- """Whether to allow the model to run tool calls in parallel."""
-
- previous_response_id: Optional[str]
- """The unique ID of the previous response to the model.
-
- Use this to create multi-turn conversations. Learn more about
- [conversation state](/docs/guides/conversation-state).
- """
-
- reasoning: Optional[Reasoning]
- """**o-series models only**
-
- Configuration options for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- """
-
- store: Optional[bool]
- """Whether to store the generated model response for later retrieval via API."""
-
- stream: Optional[bool]
- """
- If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/responses-streaming) for
- more information.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. We generally recommend altering
- this or `top_p` but not both.
- """
-
- text: Text
- """Configuration options for a text response from the model.
-
- Can be plain text or structured JSON data. Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Structured Outputs](/docs/guides/structured-outputs)
- """
-
- tool_choice: ToolChoice
- """
- How the model should select which tool (or tools) to use when generating a
- response. See the `tools` parameter to see how to specify which tools the model
- can call.
- """
-
- tools: Iterable[Tool]
- """An array of tools the model may call while generating a response.
-
- You can specify which tool to use by setting the `tool_choice` parameter.
-
- The two categories of tools you can provide the model are:
-
- - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
- capabilities, like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search). Learn more about
- [built-in tools](/docs/guides/tools).
- - **Function calls (custom tools)**: Functions that are defined by you, enabling
- the model to call your own code. Learn more about
- [function calling](/docs/guides/function-calling).
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
- """
-
- truncation: Optional[Literal["auto", "disabled"]]
- """The truncation strategy to use for the model response.
-
- - `auto`: If the context of this response and previous ones exceeds the model's
- context window size, the model will truncate the response to fit the context
- window by dropping input items in the middle of the conversation.
- - `disabled` (default): If a model response will exceed the context window size
- for a model, the request will fail with a 400 error.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
-
-
-class InputInputItemListMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[InputContentParam]]]
- """
- Text, image, or audio input to the model, used to generate a response. Can also
- contain previous assistant responses.
- """
-
- role: Required[Literal["user", "assistant", "system", "developer"]]
- """The role of the message input.
-
- One of `user`, `assistant`, `system`, or `developer`.
- """
-
- type: Literal["message"]
- """The type of the message input. Always `message`."""
-
-
-class InputInputItemListItemReference(TypedDict, total=False):
- id: Required[str]
- """The ID of the item to reference."""
-
- type: Required[Literal["item_reference"]]
- """The type of item to reference. Always `item_reference`."""
-
-
-InputInputItemList: TypeAlias = Union[
- InputInputItemListMessage,
- InputMessageParam,
- OutputMessageParam,
- FileSearchToolCallParam,
- ComputerToolCallParam,
- ComputerToolCallOutputParam,
- WebSearchToolCallParam,
- FunctionToolCallParam,
- FunctionToolCallOutputParam,
- ReasoningItemParam,
- InputInputItemListItemReference,
-]
-
-
-class Reasoning(TypedDict, total=False):
- effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- generate_summary: Optional[Literal["concise", "detailed"]]
- """**computer_use_preview only**
-
- A summary of the reasoning performed by the model. This can be useful for
- debugging and understanding the model's reasoning process. One of `concise` or
- `detailed`.
- """
-
-
-class TextFormatTextResponseFormatJsonSchema(TypedDict, total=False):
- schema: Required[Dict[str, object]]
- """
- The schema for the response format, described as a JSON Schema object. Learn how
- to build JSON schemas [here](https://json-schema.org/).
- """
-
- type: Required[Literal["json_schema"]]
- """The type of response format being defined. Always `json_schema`."""
-
- description: str
- """
- A description of what the response format is for, used by the model to determine
- how to respond in the format.
- """
-
- name: str
- """The name of the response format.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- strict: Optional[bool]
- """
- Whether to enable strict schema adherence when generating the output. If set to
- true, the model will always follow the exact schema defined in the `schema`
- field. Only a subset of JSON Schema is supported when `strict` is `true`. To
- learn more, read the
- [Structured Outputs guide](/docs/guides/structured-outputs).
- """
-
-
-TextFormat: TypeAlias = Union[
- ResponseFormatTextParam, TextFormatTextResponseFormatJsonSchema, ResponseFormatJsonObjectParam
-]
-
-
-class Text(TypedDict, total=False):
- format: TextFormat
- """An object specifying the format that the model must output.
-
- Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
- ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](/docs/guides/structured-outputs).
-
- The default format is `{ "type": "text" }` with no additional options.
-
- **Not recommended for gpt-4o and newer models:**
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
- """
-
-
-class ToolChoiceToolChoiceTypes(TypedDict, total=False):
- type: Required[
- Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
- ]
- """The type of hosted tool the model should to use.
-
- Learn more about [built-in tools](/docs/guides/tools).
-
- Allowed values are:
-
- - `file_search`
- - `web_search_preview`
- - `computer_use_preview`
- """
-
-
-class ToolChoiceToolChoiceFunction(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
- type: Required[Literal["function"]]
- """For function calling, the type is always `function`."""
-
-
-ToolChoice: TypeAlias = Union[
- Literal["none", "auto", "required"], ToolChoiceToolChoiceTypes, ToolChoiceToolChoiceFunction
-]
-
-ToolFileSearchToolFilters: TypeAlias = Union[ComparisonFilterParam, CompoundFilterParam]
-
-
-class ToolFileSearchToolRankingOptions(TypedDict, total=False):
- ranker: Literal["auto", "default-2024-11-15"]
- """The ranker to use for the file search."""
-
- score_threshold: float
- """
- The score threshold for the file search, a number between 0 and 1. Numbers
- closer to 1 will attempt to return only the most relevant results, but may
- return fewer results.
- """
-
-
-class ToolFileSearchTool(TypedDict, total=False):
- type: Required[Literal["file_search"]]
- """The type of the file search tool. Always `file_search`."""
-
- vector_store_ids: Required[List[str]]
- """The IDs of the vector stores to search."""
-
- filters: ToolFileSearchToolFilters
- """A filter to apply based on file attributes."""
-
- max_num_results: int
- """The maximum number of results to return.
-
- This number should be between 1 and 50 inclusive.
- """
-
- ranking_options: ToolFileSearchToolRankingOptions
- """Ranking options for search."""
-
-
-class ToolFunctionTool(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
- parameters: Required[Dict[str, object]]
- """A JSON schema object describing the parameters of the function."""
-
- strict: Required[bool]
- """Whether to enforce strict parameter validation. Default `true`."""
-
- type: Required[Literal["function"]]
- """The type of the function tool. Always `function`."""
-
- description: Optional[str]
- """A description of the function.
-
- Used by the model to determine whether or not to call the function.
- """
-
-
-class ToolComputerTool(TypedDict, total=False):
- display_height: Required[float]
- """The height of the computer display."""
-
- display_width: Required[float]
- """The width of the computer display."""
-
- environment: Required[Literal["mac", "windows", "ubuntu", "browser"]]
- """The type of computer environment to control."""
-
- type: Required[Literal["computer_use_preview"]]
- """The type of the computer use tool. Always `computer_use_preview`."""
-
-
-class ToolWebSearchToolUserLocation(WebSearchLocationParam, total=False):
- type: Required[Literal["approximate"]]
- """The type of location approximation. Always `approximate`."""
-
-
-class ToolWebSearchTool(TypedDict, total=False):
- type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]]
- """The type of the web search tool. One of:
-
- - `web_search_preview`
- - `web_search_preview_2025_03_11`
- """
-
- search_context_size: WebSearchContextSize
- """
- High level guidance for the amount of context window space to use for the
- search. One of `low`, `medium`, or `high`. `medium` is the default.
- """
-
- user_location: Optional[ToolWebSearchToolUserLocation]
- """Approximate location parameters for the search."""
-
-
-Tool: TypeAlias = Union[ToolFileSearchTool, ToolFunctionTool, ToolComputerTool, ToolWebSearchTool]
diff --git a/src/digitalocean_genai_sdk/types/response_list_input_items_params.py b/src/digitalocean_genai_sdk/types/response_list_input_items_params.py
deleted file mode 100644
index cba0c8b8..00000000
--- a/src/digitalocean_genai_sdk/types/response_list_input_items_params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["ResponseListInputItemsParams"]
-
-
-class ResponseListInputItemsParams(TypedDict, total=False):
- after: str
- """An item ID to list items after, used in pagination."""
-
- before: str
- """An item ID to list items before, used in pagination."""
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """The order to return the input items in. Default is `asc`.
-
- - `asc`: Return the input items in ascending order.
- - `desc`: Return the input items in descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/response_list_input_items_response.py b/src/digitalocean_genai_sdk/types/response_list_input_items_response.py
deleted file mode 100644
index 95f4555e..00000000
--- a/src/digitalocean_genai_sdk/types/response_list_input_items_response.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-from .input_message import InputMessage
-from .output_message import OutputMessage
-from .computer_tool_call import ComputerToolCall
-from .function_tool_call import FunctionToolCall
-from .web_search_tool_call import WebSearchToolCall
-from .file_search_tool_call import FileSearchToolCall
-from .computer_tool_call_output import ComputerToolCallOutput
-from .function_tool_call_output import FunctionToolCallOutput
-
-__all__ = [
- "ResponseListInputItemsResponse",
- "Data",
- "DataMessage",
- "DataComputerCallOutput",
- "DataFunctionCall",
- "DataFunctionCallOutput",
-]
-
-
-class DataMessage(InputMessage):
- id: str
- """The unique ID of the message input."""
-
-
-class DataComputerCallOutput(ComputerToolCallOutput):
- id: str # type: ignore
- """The unique ID of the computer call tool output."""
-
-
-class DataFunctionCall(FunctionToolCall):
- id: str # type: ignore
- """The unique ID of the function tool call."""
-
-
-class DataFunctionCallOutput(FunctionToolCallOutput):
- id: str # type: ignore
- """The unique ID of the function call tool output."""
-
-
-Data: TypeAlias = Annotated[
- Union[
- DataMessage,
- OutputMessage,
- FileSearchToolCall,
- ComputerToolCall,
- DataComputerCallOutput,
- WebSearchToolCall,
- DataFunctionCall,
- DataFunctionCallOutput,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class ResponseListInputItemsResponse(BaseModel):
- data: List[Data]
- """A list of items used to generate this response."""
-
- first_id: str
- """The ID of the first item in the list."""
-
- has_more: bool
- """Whether there are more items available."""
-
- last_id: str
- """The ID of the last item in the list."""
-
- object: Literal["list"]
- """The type of object returned, must be `list`."""
diff --git a/src/digitalocean_genai_sdk/types/response_properties.py b/src/digitalocean_genai_sdk/types/response_properties.py
deleted file mode 100644
index 84746be5..00000000
--- a/src/digitalocean_genai_sdk/types/response_properties.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-from .compound_filter import CompoundFilter
-from .reasoning_effort import ReasoningEffort
-from .comparison_filter import ComparisonFilter
-from .chat.web_search_location import WebSearchLocation
-from .chat.response_format_text import ResponseFormatText
-from .chat.web_search_context_size import WebSearchContextSize
-from .chat.response_format_json_object import ResponseFormatJsonObject
-
-__all__ = [
- "ResponseProperties",
- "Reasoning",
- "Text",
- "TextFormat",
- "TextFormatTextResponseFormatJsonSchema",
- "ToolChoice",
- "ToolChoiceToolChoiceTypes",
- "ToolChoiceToolChoiceFunction",
- "Tool",
- "ToolFileSearchTool",
- "ToolFileSearchToolFilters",
- "ToolFileSearchToolRankingOptions",
- "ToolFunctionTool",
- "ToolComputerTool",
- "ToolWebSearchTool",
- "ToolWebSearchToolUserLocation",
-]
-
-
-class Reasoning(BaseModel):
- effort: Optional[ReasoningEffort] = None
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- generate_summary: Optional[Literal["concise", "detailed"]] = None
- """**computer_use_preview only**
-
- A summary of the reasoning performed by the model. This can be useful for
- debugging and understanding the model's reasoning process. One of `concise` or
- `detailed`.
- """
-
-
-class TextFormatTextResponseFormatJsonSchema(BaseModel):
- schema_: Dict[str, object] = FieldInfo(alias="schema")
- """
- The schema for the response format, described as a JSON Schema object. Learn how
- to build JSON schemas [here](https://json-schema.org/).
- """
-
- type: Literal["json_schema"]
- """The type of response format being defined. Always `json_schema`."""
-
- description: Optional[str] = None
- """
- A description of what the response format is for, used by the model to determine
- how to respond in the format.
- """
-
- name: Optional[str] = None
- """The name of the response format.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- strict: Optional[bool] = None
- """
- Whether to enable strict schema adherence when generating the output. If set to
- true, the model will always follow the exact schema defined in the `schema`
- field. Only a subset of JSON Schema is supported when `strict` is `true`. To
- learn more, read the
- [Structured Outputs guide](/docs/guides/structured-outputs).
- """
-
-
-TextFormat: TypeAlias = Union[ResponseFormatText, TextFormatTextResponseFormatJsonSchema, ResponseFormatJsonObject]
-
-
-class Text(BaseModel):
- format: Optional[TextFormat] = None
- """An object specifying the format that the model must output.
-
- Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
- ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](/docs/guides/structured-outputs).
-
- The default format is `{ "type": "text" }` with no additional options.
-
- **Not recommended for gpt-4o and newer models:**
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
- """
-
-
-class ToolChoiceToolChoiceTypes(BaseModel):
- type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
- """The type of hosted tool the model should to use.
-
- Learn more about [built-in tools](/docs/guides/tools).
-
- Allowed values are:
-
- - `file_search`
- - `web_search_preview`
- - `computer_use_preview`
- """
-
-
-class ToolChoiceToolChoiceFunction(BaseModel):
- name: str
- """The name of the function to call."""
-
- type: Literal["function"]
- """For function calling, the type is always `function`."""
-
-
-ToolChoice: TypeAlias = Union[
- Literal["none", "auto", "required"], ToolChoiceToolChoiceTypes, ToolChoiceToolChoiceFunction
-]
-
-ToolFileSearchToolFilters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
-
-
-class ToolFileSearchToolRankingOptions(BaseModel):
- ranker: Optional[Literal["auto", "default-2024-11-15"]] = None
- """The ranker to use for the file search."""
-
- score_threshold: Optional[float] = None
- """
- The score threshold for the file search, a number between 0 and 1. Numbers
- closer to 1 will attempt to return only the most relevant results, but may
- return fewer results.
- """
-
-
-class ToolFileSearchTool(BaseModel):
- type: Literal["file_search"]
- """The type of the file search tool. Always `file_search`."""
-
- vector_store_ids: List[str]
- """The IDs of the vector stores to search."""
-
- filters: Optional[ToolFileSearchToolFilters] = None
- """A filter to apply based on file attributes."""
-
- max_num_results: Optional[int] = None
- """The maximum number of results to return.
-
- This number should be between 1 and 50 inclusive.
- """
-
- ranking_options: Optional[ToolFileSearchToolRankingOptions] = None
- """Ranking options for search."""
-
-
-class ToolFunctionTool(BaseModel):
- name: str
- """The name of the function to call."""
-
- parameters: Dict[str, object]
- """A JSON schema object describing the parameters of the function."""
-
- strict: bool
- """Whether to enforce strict parameter validation. Default `true`."""
-
- type: Literal["function"]
- """The type of the function tool. Always `function`."""
-
- description: Optional[str] = None
- """A description of the function.
-
- Used by the model to determine whether or not to call the function.
- """
-
-
-class ToolComputerTool(BaseModel):
- display_height: float
- """The height of the computer display."""
-
- display_width: float
- """The width of the computer display."""
-
- environment: Literal["mac", "windows", "ubuntu", "browser"]
- """The type of computer environment to control."""
-
- type: Literal["computer_use_preview"]
- """The type of the computer use tool. Always `computer_use_preview`."""
-
-
-class ToolWebSearchToolUserLocation(WebSearchLocation):
- type: Literal["approximate"]
- """The type of location approximation. Always `approximate`."""
-
-
-class ToolWebSearchTool(BaseModel):
- type: Literal["web_search_preview", "web_search_preview_2025_03_11"]
- """The type of the web search tool. One of:
-
- - `web_search_preview`
- - `web_search_preview_2025_03_11`
- """
-
- search_context_size: Optional[WebSearchContextSize] = None
- """
- High level guidance for the amount of context window space to use for the
- search. One of `low`, `medium`, or `high`. `medium` is the default.
- """
-
- user_location: Optional[ToolWebSearchToolUserLocation] = None
- """Approximate location parameters for the search."""
-
-
-Tool: TypeAlias = Union[ToolFileSearchTool, ToolFunctionTool, ToolComputerTool, ToolWebSearchTool]
-
-
-class ResponseProperties(BaseModel):
- instructions: Optional[str] = None
- """
- Inserts a system (or developer) message as the first item in the model's
- context.
-
- When using along with `previous_response_id`, the instructions from a previous
- response will be not be carried over to the next response. This makes it simple
- to swap out system (or developer) messages in new responses.
- """
-
- max_output_tokens: Optional[int] = None
- """
- An upper bound for the number of tokens that can be generated for a response,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
- """
-
- model: Union[
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- "o1-pro",
- "o1-pro-2025-03-19",
- "computer-use-preview",
- "computer-use-preview-2025-03-11",
- ],
- str,
- None,
- ] = None
- """Model ID used to generate the response, like `gpt-4o` or `o1`.
-
- OpenAI offers a wide range of models with different capabilities, performance
- characteristics, and price points. Refer to the [model guide](/docs/models) to
- browse and compare available models.
- """
-
- previous_response_id: Optional[str] = None
- """The unique ID of the previous response to the model.
-
- Use this to create multi-turn conversations. Learn more about
- [conversation state](/docs/guides/conversation-state).
- """
-
- reasoning: Optional[Reasoning] = None
- """**o-series models only**
-
- Configuration options for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- """
-
- text: Optional[Text] = None
- """Configuration options for a text response from the model.
-
- Can be plain text or structured JSON data. Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Structured Outputs](/docs/guides/structured-outputs)
- """
-
- tool_choice: Optional[ToolChoice] = None
- """
- How the model should select which tool (or tools) to use when generating a
- response. See the `tools` parameter to see how to specify which tools the model
- can call.
- """
-
- tools: Optional[List[Tool]] = None
- """An array of tools the model may call while generating a response.
-
- You can specify which tool to use by setting the `tool_choice` parameter.
-
- The two categories of tools you can provide the model are:
-
- - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
- capabilities, like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search). Learn more about
- [built-in tools](/docs/guides/tools).
- - **Function calls (custom tools)**: Functions that are defined by you, enabling
- the model to call your own code. Learn more about
- [function calling](/docs/guides/function-calling).
- """
-
- truncation: Optional[Literal["auto", "disabled"]] = None
- """The truncation strategy to use for the model response.
-
- - `auto`: If the context of this response and previous ones exceeds the model's
- context window size, the model will truncate the response to fit the context
- window by dropping input items in the middle of the conversation.
- - `disabled` (default): If a model response will exceed the context window size
- for a model, the request will fail with a 400 error.
- """
diff --git a/src/digitalocean_genai_sdk/types/response_retrieve_params.py b/src/digitalocean_genai_sdk/types/response_retrieve_params.py
deleted file mode 100644
index b85dbba1..00000000
--- a/src/digitalocean_genai_sdk/types/response_retrieve_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import TypedDict
-
-from .includable import Includable
-
-__all__ = ["ResponseRetrieveParams"]
-
-
-class ResponseRetrieveParams(TypedDict, total=False):
- include: List[Includable]
- """Specify additional output data to include in the response.
-
- Currently supported values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
- """
diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy.py
deleted file mode 100644
index a4c0ce82..00000000
--- a/src/digitalocean_genai_sdk/types/static_chunking_strategy.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["StaticChunkingStrategy"]
-
-
-class StaticChunkingStrategy(BaseModel):
- chunk_overlap_tokens: int
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: int
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py
deleted file mode 100644
index c3535404..00000000
--- a/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["StaticChunkingStrategyParam"]
-
-
-class StaticChunkingStrategyParam(TypedDict, total=False):
- chunk_overlap_tokens: Required[int]
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: Required[int]
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py
deleted file mode 100644
index 51de3b75..00000000
--- a/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from .static_chunking_strategy_param import StaticChunkingStrategyParam
-
-__all__ = ["StaticChunkingStrategyRequestParam"]
-
-
-class StaticChunkingStrategyRequestParam(TypedDict, total=False):
- static: Required[StaticChunkingStrategyParam]
-
- type: Required[Literal["static"]]
- """Always `static`."""
diff --git a/src/digitalocean_genai_sdk/types/thread_create_params.py b/src/digitalocean_genai_sdk/types/thread_create_params.py
deleted file mode 100644
index 7ee77039..00000000
--- a/src/digitalocean_genai_sdk/types/thread_create_params.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .threads.create_message_request_param import CreateMessageRequestParam
-
-__all__ = [
- "ThreadCreateParams",
- "ToolResources",
- "ToolResourcesCodeInterpreter",
- "ToolResourcesFileSearch",
- "ToolResourcesFileSearchVectorStore",
- "ToolResourcesFileSearchVectorStoreChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic",
-]
-
-
-class ThreadCreateParams(TypedDict, total=False):
- messages: Iterable[CreateMessageRequestParam]
- """A list of [messages](/docs/api-reference/messages) to start the thread with."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- tool_resources: Optional[ToolResources]
- """
- A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False):
- type: Required[Literal["auto"]]
- """Always `auto`."""
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False):
- chunk_overlap_tokens: Required[int]
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: Required[int]
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False):
- static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic]
-
- type: Required[Literal["static"]]
- """Always `static`."""
-
-
-ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
- ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy,
- ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy,
-]
-
-
-class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
- chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
-
- file_ids: List[str]
- """A list of [file](/docs/api-reference/files) IDs to add to the vector store.
-
- There can be a maximum of 10000 files in a vector store.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- thread. There can be a maximum of 1 vector store attached to the thread.
- """
-
- vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
- """
- A helper to create a [vector store](/docs/api-reference/vector-stores/object)
- with file_ids and attach it to this thread. There can be a maximum of 1 vector
- store attached to the thread.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
diff --git a/src/digitalocean_genai_sdk/types/thread_delete_response.py b/src/digitalocean_genai_sdk/types/thread_delete_response.py
deleted file mode 100644
index 74f09d84..00000000
--- a/src/digitalocean_genai_sdk/types/thread_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ThreadDeleteResponse"]
-
-
-class ThreadDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["thread.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/thread_object.py b/src/digitalocean_genai_sdk/types/thread_object.py
deleted file mode 100644
index 7924dd8f..00000000
--- a/src/digitalocean_genai_sdk/types/thread_object.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ThreadObject", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
-
-
-class ToolResourcesCodeInterpreter(BaseModel):
- file_ids: Optional[List[str]] = None
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearch(BaseModel):
- vector_store_ids: Optional[List[str]] = None
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- thread. There can be a maximum of 1 vector store attached to the thread.
- """
-
-
-class ToolResources(BaseModel):
- code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
-
- file_search: Optional[ToolResourcesFileSearch] = None
-
-
-class ThreadObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the thread was created."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- object: Literal["thread"]
- """The object type, which is always `thread`."""
-
- tool_resources: Optional[ToolResources] = None
- """
- A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
diff --git a/src/digitalocean_genai_sdk/types/thread_update_params.py b/src/digitalocean_genai_sdk/types/thread_update_params.py
deleted file mode 100644
index d952d35b..00000000
--- a/src/digitalocean_genai_sdk/types/thread_update_params.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Optional
-from typing_extensions import TypedDict
-
-__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
-
-
-class ThreadUpdateParams(TypedDict, total=False):
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- tool_resources: Optional[ToolResources]
- """
- A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- thread. There can be a maximum of 1 vector store attached to the thread.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
diff --git a/src/digitalocean_genai_sdk/types/threads/__init__.py b/src/digitalocean_genai_sdk/types/threads/__init__.py
index 9af8d93a..f8ee8b14 100644
--- a/src/digitalocean_genai_sdk/types/threads/__init__.py
+++ b/src/digitalocean_genai_sdk/types/threads/__init__.py
@@ -1,36 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
-
-from .run_object import RunObject as RunObject
-from .message_object import MessageObject as MessageObject
-from .run_list_params import RunListParams as RunListParams
-from .run_create_params import RunCreateParams as RunCreateParams
-from .run_list_response import RunListResponse as RunListResponse
-from .run_update_params import RunUpdateParams as RunUpdateParams
-from .truncation_object import TruncationObject as TruncationObject
-from .message_list_params import MessageListParams as MessageListParams
-from .message_create_params import MessageCreateParams as MessageCreateParams
-from .message_list_response import MessageListResponse as MessageListResponse
-from .message_update_params import MessageUpdateParams as MessageUpdateParams
-from .run_create_run_params import RunCreateRunParams as RunCreateRunParams
-from .message_delete_response import MessageDeleteResponse as MessageDeleteResponse
-from .truncation_object_param import TruncationObjectParam as TruncationObjectParam
-from .create_message_request_param import CreateMessageRequestParam as CreateMessageRequestParam
-from .run_submit_tool_outputs_params import RunSubmitToolOutputsParams as RunSubmitToolOutputsParams
-from .message_content_image_url_object import MessageContentImageURLObject as MessageContentImageURLObject
-from .assistants_api_tool_choice_option import AssistantsAPIToolChoiceOption as AssistantsAPIToolChoiceOption
-from .message_content_image_file_object import MessageContentImageFileObject as MessageContentImageFileObject
-from .assistant_tools_file_search_type_only import AssistantToolsFileSearchTypeOnly as AssistantToolsFileSearchTypeOnly
-from .message_content_image_url_object_param import (
- MessageContentImageURLObjectParam as MessageContentImageURLObjectParam,
-)
-from .assistants_api_tool_choice_option_param import (
- AssistantsAPIToolChoiceOptionParam as AssistantsAPIToolChoiceOptionParam,
-)
-from .message_content_image_file_object_param import (
- MessageContentImageFileObjectParam as MessageContentImageFileObjectParam,
-)
-from .assistant_tools_file_search_type_only_param import (
- AssistantToolsFileSearchTypeOnlyParam as AssistantToolsFileSearchTypeOnlyParam,
-)
diff --git a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py
deleted file mode 100644
index 6708bff3..00000000
--- a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["AssistantToolsFileSearchTypeOnly"]
-
-
-class AssistantToolsFileSearchTypeOnly(BaseModel):
- type: Literal["file_search"]
- """The type of tool being defined: `file_search`"""
diff --git a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py
deleted file mode 100644
index f0a48b2c..00000000
--- a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["AssistantToolsFileSearchTypeOnlyParam"]
-
-
-class AssistantToolsFileSearchTypeOnlyParam(TypedDict, total=False):
- type: Required[Literal["file_search"]]
- """The type of tool being defined: `file_search`"""
diff --git a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py
deleted file mode 100644
index af7be1f7..00000000
--- a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ..._models import BaseModel
-
-__all__ = ["AssistantsAPIToolChoiceOption", "AssistantsNamedToolChoice", "AssistantsNamedToolChoiceFunction"]
-
-
-class AssistantsNamedToolChoiceFunction(BaseModel):
- name: str
- """The name of the function to call."""
-
-
-class AssistantsNamedToolChoice(BaseModel):
- type: Literal["function", "code_interpreter", "file_search"]
- """The type of the tool. If type is `function`, the function name must be set"""
-
- function: Optional[AssistantsNamedToolChoiceFunction] = None
-
-
-AssistantsAPIToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantsNamedToolChoice]
diff --git a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py
deleted file mode 100644
index 10f98f89..00000000
--- a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = ["AssistantsAPIToolChoiceOptionParam", "AssistantsNamedToolChoice", "AssistantsNamedToolChoiceFunction"]
-
-
-class AssistantsNamedToolChoiceFunction(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
-
-class AssistantsNamedToolChoice(TypedDict, total=False):
- type: Required[Literal["function", "code_interpreter", "file_search"]]
- """The type of the tool. If type is `function`, the function name must be set"""
-
- function: AssistantsNamedToolChoiceFunction
-
-
-AssistantsAPIToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantsNamedToolChoice]
diff --git a/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py b/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py
deleted file mode 100644
index 64c2a781..00000000
--- a/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..assistant_tools_code_param import AssistantToolsCodeParam
-from .message_content_image_url_object_param import MessageContentImageURLObjectParam
-from .message_content_image_file_object_param import MessageContentImageFileObjectParam
-from .assistant_tools_file_search_type_only_param import AssistantToolsFileSearchTypeOnlyParam
-
-__all__ = [
- "CreateMessageRequestParam",
- "ContentArrayOfContentPart",
- "ContentArrayOfContentPartMessageRequestContentTextObject",
- "Attachment",
- "AttachmentTool",
-]
-
-
-class ContentArrayOfContentPartMessageRequestContentTextObject(TypedDict, total=False):
- text: Required[str]
- """Text content to be sent to the model"""
-
- type: Required[Literal["text"]]
- """Always `text`."""
-
-
-ContentArrayOfContentPart: TypeAlias = Union[
- MessageContentImageFileObjectParam,
- MessageContentImageURLObjectParam,
- ContentArrayOfContentPartMessageRequestContentTextObject,
-]
-
-AttachmentTool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchTypeOnlyParam]
-
-
-class Attachment(TypedDict, total=False):
- file_id: str
- """The ID of the file to attach to the message."""
-
- tools: Iterable[AttachmentTool]
- """The tools to add this file to."""
-
-
-class CreateMessageRequestParam(TypedDict, total=False):
- content: Required[Union[str, Iterable[ContentArrayOfContentPart]]]
- """The text contents of the message."""
-
- role: Required[Literal["user", "assistant"]]
- """The role of the entity that is creating the message. Allowed values include:
-
- - `user`: Indicates the message is sent by an actual user and should be used in
- most cases to represent user-generated messages.
- - `assistant`: Indicates the message is generated by the assistant. Use this
- value to insert messages from the assistant into the conversation.
- """
-
- attachments: Optional[Iterable[Attachment]]
- """A list of files attached to the message, and the tools they should be added to."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py
deleted file mode 100644
index b22ef410..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["MessageContentImageFileObject", "ImageFile"]
-
-
-class ImageFile(BaseModel):
- file_id: str
- """The [File](/docs/api-reference/files) ID of the image in the message content.
-
- Set `purpose="vision"` when uploading the File if you need to later display the
- file content.
- """
-
- detail: Optional[Literal["auto", "low", "high"]] = None
- """Specifies the detail level of the image if specified by the user.
-
- `low` uses fewer tokens, you can opt in to high resolution using `high`.
- """
-
-
-class MessageContentImageFileObject(BaseModel):
- image_file: ImageFile
-
- type: Literal["image_file"]
- """Always `image_file`."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py
deleted file mode 100644
index 734dcf15..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["MessageContentImageFileObjectParam", "ImageFile"]
-
-
-class ImageFile(TypedDict, total=False):
- file_id: Required[str]
- """The [File](/docs/api-reference/files) ID of the image in the message content.
-
- Set `purpose="vision"` when uploading the File if you need to later display the
- file content.
- """
-
- detail: Literal["auto", "low", "high"]
- """Specifies the detail level of the image if specified by the user.
-
- `low` uses fewer tokens, you can opt in to high resolution using `high`.
- """
-
-
-class MessageContentImageFileObjectParam(TypedDict, total=False):
- image_file: Required[ImageFile]
-
- type: Required[Literal["image_file"]]
- """Always `image_file`."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py
deleted file mode 100644
index 9a7f980b..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["MessageContentImageURLObject", "ImageURL"]
-
-
-class ImageURL(BaseModel):
- url: str
- """
- The external URL of the image, must be a supported image types: jpeg, jpg, png,
- gif, webp.
- """
-
- detail: Optional[Literal["auto", "low", "high"]] = None
- """Specifies the detail level of the image.
-
- `low` uses fewer tokens, you can opt in to high resolution using `high`. Default
- value is `auto`
- """
-
-
-class MessageContentImageURLObject(BaseModel):
- image_url: ImageURL
-
- type: Literal["image_url"]
- """The type of the content part."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py
deleted file mode 100644
index f3f777c4..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["MessageContentImageURLObjectParam", "ImageURL"]
-
-
-class ImageURL(TypedDict, total=False):
- url: Required[str]
- """
- The external URL of the image, must be a supported image types: jpeg, jpg, png,
- gif, webp.
- """
-
- detail: Literal["auto", "low", "high"]
- """Specifies the detail level of the image.
-
- `low` uses fewer tokens, you can opt in to high resolution using `high`. Default
- value is `auto`
- """
-
-
-class MessageContentImageURLObjectParam(TypedDict, total=False):
- image_url: Required[ImageURL]
-
- type: Required[Literal["image_url"]]
- """The type of the content part."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_create_params.py b/src/digitalocean_genai_sdk/types/threads/message_create_params.py
deleted file mode 100644
index d9a4cd40..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_create_params.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..assistant_tools_code_param import AssistantToolsCodeParam
-from .message_content_image_url_object_param import MessageContentImageURLObjectParam
-from .message_content_image_file_object_param import MessageContentImageFileObjectParam
-from .assistant_tools_file_search_type_only_param import AssistantToolsFileSearchTypeOnlyParam
-
-__all__ = [
- "MessageCreateParams",
- "ContentArrayOfContentPart",
- "ContentArrayOfContentPartMessageRequestContentTextObject",
- "Attachment",
- "AttachmentTool",
-]
-
-
-class MessageCreateParams(TypedDict, total=False):
- content: Required[Union[str, Iterable[ContentArrayOfContentPart]]]
- """The text contents of the message."""
-
- role: Required[Literal["user", "assistant"]]
- """The role of the entity that is creating the message. Allowed values include:
-
- - `user`: Indicates the message is sent by an actual user and should be used in
- most cases to represent user-generated messages.
- - `assistant`: Indicates the message is generated by the assistant. Use this
- value to insert messages from the assistant into the conversation.
- """
-
- attachments: Optional[Iterable[Attachment]]
- """A list of files attached to the message, and the tools they should be added to."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
-
-class ContentArrayOfContentPartMessageRequestContentTextObject(TypedDict, total=False):
- text: Required[str]
- """Text content to be sent to the model"""
-
- type: Required[Literal["text"]]
- """Always `text`."""
-
-
-ContentArrayOfContentPart: TypeAlias = Union[
- MessageContentImageFileObjectParam,
- MessageContentImageURLObjectParam,
- ContentArrayOfContentPartMessageRequestContentTextObject,
-]
-
-AttachmentTool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchTypeOnlyParam]
-
-
-class Attachment(TypedDict, total=False):
- file_id: str
- """The ID of the file to attach to the message."""
-
- tools: Iterable[AttachmentTool]
- """The tools to add this file to."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_delete_response.py b/src/digitalocean_genai_sdk/types/threads/message_delete_response.py
deleted file mode 100644
index c86408dc..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["MessageDeleteResponse"]
-
-
-class MessageDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["thread.message.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/threads/message_list_params.py b/src/digitalocean_genai_sdk/types/threads/message_list_params.py
deleted file mode 100644
index a7c22a66..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_list_params.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["MessageListParams"]
-
-
-class MessageListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
-
- run_id: str
- """Filter messages by the run ID that generated them."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_list_response.py b/src/digitalocean_genai_sdk/types/threads/message_list_response.py
deleted file mode 100644
index f710da32..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ..._models import BaseModel
-from .message_object import MessageObject
-
-__all__ = ["MessageListResponse"]
-
-
-class MessageListResponse(BaseModel):
- data: List[MessageObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/threads/message_object.py b/src/digitalocean_genai_sdk/types/threads/message_object.py
deleted file mode 100644
index b2cb3711..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_object.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ..._models import BaseModel
-from ..assistant_tools_code import AssistantToolsCode
-from .message_content_image_url_object import MessageContentImageURLObject
-from .message_content_image_file_object import MessageContentImageFileObject
-from .assistant_tools_file_search_type_only import AssistantToolsFileSearchTypeOnly
-
-__all__ = [
- "MessageObject",
- "Attachment",
- "AttachmentTool",
- "Content",
- "ContentMessageContentTextObject",
- "ContentMessageContentTextObjectText",
- "ContentMessageContentTextObjectTextAnnotation",
- "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject",
- "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation",
- "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject",
- "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath",
- "ContentMessageContentRefusalObject",
- "IncompleteDetails",
-]
-
-AttachmentTool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearchTypeOnly]
-
-
-class Attachment(BaseModel):
- file_id: Optional[str] = None
- """The ID of the file to attach to the message."""
-
- tools: Optional[List[AttachmentTool]] = None
- """The tools to add this file to."""
-
-
-class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation(
- BaseModel
-):
- file_id: str
- """The ID of the specific File the citation is from."""
-
-
-class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject(BaseModel):
- end_index: int
-
- file_citation: (
- ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation
- )
-
- start_index: int
-
- text: str
- """The text in the message content that needs to be replaced."""
-
- type: Literal["file_citation"]
- """Always `file_citation`."""
-
-
-class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath(BaseModel):
- file_id: str
- """The ID of the file that was generated."""
-
-
-class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject(BaseModel):
- end_index: int
-
- file_path: ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath
-
- start_index: int
-
- text: str
- """The text in the message content that needs to be replaced."""
-
- type: Literal["file_path"]
- """Always `file_path`."""
-
-
-ContentMessageContentTextObjectTextAnnotation: TypeAlias = Union[
- ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject,
- ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject,
-]
-
-
-class ContentMessageContentTextObjectText(BaseModel):
- annotations: List[ContentMessageContentTextObjectTextAnnotation]
-
- value: str
- """The data that makes up the text."""
-
-
-class ContentMessageContentTextObject(BaseModel):
- text: ContentMessageContentTextObjectText
-
- type: Literal["text"]
- """Always `text`."""
-
-
-class ContentMessageContentRefusalObject(BaseModel):
- refusal: str
-
- type: Literal["refusal"]
- """Always `refusal`."""
-
-
-Content: TypeAlias = Union[
- MessageContentImageFileObject,
- MessageContentImageURLObject,
- ContentMessageContentTextObject,
- ContentMessageContentRefusalObject,
-]
-
-
-class IncompleteDetails(BaseModel):
- reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"]
- """The reason the message is incomplete."""
-
-
-class MessageObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- assistant_id: Optional[str] = None
- """
- If applicable, the ID of the [assistant](/docs/api-reference/assistants) that
- authored this message.
- """
-
- attachments: Optional[List[Attachment]] = None
- """A list of files attached to the message, and the tools they were added to."""
-
- completed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the message was completed."""
-
- content: List[Content]
- """The content of the message in array of text and/or images."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the message was created."""
-
- incomplete_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the message was marked as incomplete."""
-
- incomplete_details: Optional[IncompleteDetails] = None
- """On an incomplete message, details about why the message is incomplete."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- object: Literal["thread.message"]
- """The object type, which is always `thread.message`."""
-
- role: Literal["user", "assistant"]
- """The entity that produced the message. One of `user` or `assistant`."""
-
- run_id: Optional[str] = None
- """
- The ID of the [run](/docs/api-reference/runs) associated with the creation of
- this message. Value is `null` when messages are created manually using the
- create message or create thread endpoints.
- """
-
- status: Literal["in_progress", "incomplete", "completed"]
- """
- The status of the message, which can be either `in_progress`, `incomplete`, or
- `completed`.
- """
-
- thread_id: str
- """The [thread](/docs/api-reference/threads) ID that this message belongs to."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_update_params.py b/src/digitalocean_genai_sdk/types/threads/message_update_params.py
deleted file mode 100644
index a2e25260..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_update_params.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["MessageUpdateParams"]
-
-
-class MessageUpdateParams(TypedDict, total=False):
- thread_id: Required[str]
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/run_create_params.py b/src/digitalocean_genai_sdk/types/threads/run_create_params.py
deleted file mode 100644
index 43d0611a..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_create_params.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .truncation_object_param import TruncationObjectParam
-from ..assistant_tools_code_param import AssistantToolsCodeParam
-from ..create_thread_request_param import CreateThreadRequestParam
-from ..assistant_tools_function_param import AssistantToolsFunctionParam
-from ..assistant_tools_file_search_param import AssistantToolsFileSearchParam
-from .assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam
-from ..assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = ["RunCreateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "Tool"]
-
-
-class RunCreateParams(TypedDict, total=False):
- assistant_id: Required[str]
- """
- The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
- """
-
- instructions: Optional[str]
- """Override the default system message of the assistant.
-
- This is useful for modifying the behavior on a per-run basis.
- """
-
- max_completion_tokens: Optional[int]
- """
- The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
- """
-
- max_prompt_tokens: Optional[int]
- """The maximum number of prompt tokens that may be used over the course of the run.
-
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: Union[
- str,
- Literal[
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4.5-preview",
- "gpt-4.5-preview-2025-02-27",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- ],
- None,
- ]
- """The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run.
-
- If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
- """
-
- parallel_tool_calls: bool
- """
- Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOptionParam]
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- stream: Optional[bool]
- """
- If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- thread: CreateThreadRequestParam
- """Options to create a new thread.
-
- If no thread is provided when running a request, an empty thread will be
- created.
- """
-
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam]
- """
- Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
- """
-
- tool_resources: Optional[ToolResources]
- """A set of resources that are used by the assistant's tools.
-
- The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
- tools: Optional[Iterable[Tool]]
- """Override the tools the assistant can use for this run.
-
- This is useful for modifying the behavior on a per-run basis.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
-
- truncation_strategy: Optional[TruncationObjectParam]
- """Controls for how a thread will be truncated prior to the run.
-
- Use this to control the intial context window of the run.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The ID of the [vector store](/docs/api-reference/vector-stores/object) attached
- to this assistant. There can be a maximum of 1 vector store attached to the
- assistant.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
-
-
-Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam]
diff --git a/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py b/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py
deleted file mode 100644
index 694c7eea..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..reasoning_effort import ReasoningEffort
-from .truncation_object_param import TruncationObjectParam
-from ..assistant_supported_models import AssistantSupportedModels
-from ..assistant_tools_code_param import AssistantToolsCodeParam
-from .create_message_request_param import CreateMessageRequestParam
-from ..assistant_tools_function_param import AssistantToolsFunctionParam
-from ..assistant_tools_file_search_param import AssistantToolsFileSearchParam
-from .assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam
-from ..assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = ["RunCreateRunParams", "Tool"]
-
-
-class RunCreateRunParams(TypedDict, total=False):
- assistant_id: Required[str]
- """
- The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
- """
-
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]]
- """A list of additional fields to include in the response.
-
- Currently the only supported value is
- `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
- search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
- additional_instructions: Optional[str]
- """Appends additional instructions at the end of the instructions for the run.
-
- This is useful for modifying the behavior on a per-run basis without overriding
- other instructions.
- """
-
- additional_messages: Optional[Iterable[CreateMessageRequestParam]]
- """Adds additional messages to the thread before creating the run."""
-
- instructions: Optional[str]
- """
- Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of
- the assistant. This is useful for modifying the behavior on a per-run basis.
- """
-
- max_completion_tokens: Optional[int]
- """
- The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
- """
-
- max_prompt_tokens: Optional[int]
- """The maximum number of prompt tokens that may be used over the course of the run.
-
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: Union[str, AssistantSupportedModels, None]
- """The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run.
-
- If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
- """
-
- parallel_tool_calls: bool
- """
- Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
- """
-
- reasoning_effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOptionParam]
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- stream: Optional[bool]
- """
- If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam]
- """
- Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
- """
-
- tools: Optional[Iterable[Tool]]
- """Override the tools the assistant can use for this run.
-
- This is useful for modifying the behavior on a per-run basis.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
-
- truncation_strategy: Optional[TruncationObjectParam]
- """Controls for how a thread will be truncated prior to the run.
-
- Use this to control the intial context window of the run.
- """
-
-
-Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam]
diff --git a/src/digitalocean_genai_sdk/types/threads/run_list_params.py b/src/digitalocean_genai_sdk/types/threads/run_list_params.py
deleted file mode 100644
index fbea54f6..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_list_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["RunListParams"]
-
-
-class RunListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/run_list_response.py b/src/digitalocean_genai_sdk/types/threads/run_list_response.py
deleted file mode 100644
index 899bd0f9..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ..._models import BaseModel
-from .run_object import RunObject
-
-__all__ = ["RunListResponse"]
-
-
-class RunListResponse(BaseModel):
- data: List[RunObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/threads/run_object.py b/src/digitalocean_genai_sdk/types/threads/run_object.py
deleted file mode 100644
index fa89f4b4..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_object.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ..._models import BaseModel
-from .truncation_object import TruncationObject
-from ..assistant_tools_code import AssistantToolsCode
-from ..assistant_tools_function import AssistantToolsFunction
-from ..assistant_tools_file_search import AssistantToolsFileSearch
-from .assistants_api_tool_choice_option import AssistantsAPIToolChoiceOption
-from ..assistants_api_response_format_option import AssistantsAPIResponseFormatOption
-
-__all__ = [
- "RunObject",
- "IncompleteDetails",
- "LastError",
- "RequiredAction",
- "RequiredActionSubmitToolOutputs",
- "RequiredActionSubmitToolOutputsToolCall",
- "RequiredActionSubmitToolOutputsToolCallFunction",
- "Tool",
- "Usage",
-]
-
-
-class IncompleteDetails(BaseModel):
- reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]] = None
- """The reason why the run is incomplete.
-
- This will point to which specific token limit was reached over the course of the
- run.
- """
-
-
-class LastError(BaseModel):
- code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"]
- """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`."""
-
- message: str
- """A human-readable description of the error."""
-
-
-class RequiredActionSubmitToolOutputsToolCallFunction(BaseModel):
- arguments: str
- """The arguments that the model expects you to pass to the function."""
-
- name: str
- """The name of the function."""
-
-
-class RequiredActionSubmitToolOutputsToolCall(BaseModel):
- id: str
- """The ID of the tool call.
-
- This ID must be referenced when you submit the tool outputs in using the
- [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs)
- endpoint.
- """
-
- function: RequiredActionSubmitToolOutputsToolCallFunction
- """The function definition."""
-
- type: Literal["function"]
- """The type of tool call the output is required for.
-
- For now, this is always `function`.
- """
-
-
-class RequiredActionSubmitToolOutputs(BaseModel):
- tool_calls: List[RequiredActionSubmitToolOutputsToolCall]
- """A list of the relevant tool calls."""
-
-
-class RequiredAction(BaseModel):
- submit_tool_outputs: RequiredActionSubmitToolOutputs
- """Details on the tool outputs needed for this run to continue."""
-
- type: Literal["submit_tool_outputs"]
- """For now, this is always `submit_tool_outputs`."""
-
-
-Tool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearch, AssistantToolsFunction]
-
-
-class Usage(BaseModel):
- completion_tokens: int
- """Number of completion tokens used over the course of the run."""
-
- prompt_tokens: int
- """Number of prompt tokens used over the course of the run."""
-
- total_tokens: int
- """Total number of tokens used (prompt + completion)."""
-
-
-class RunObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- assistant_id: str
- """
- The ID of the [assistant](/docs/api-reference/assistants) used for execution of
- this run.
- """
-
- cancelled_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run was cancelled."""
-
- completed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run was completed."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the run was created."""
-
- expires_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run will expire."""
-
- failed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run failed."""
-
- incomplete_details: Optional[IncompleteDetails] = None
- """Details on why the run is incomplete.
-
- Will be `null` if the run is not incomplete.
- """
-
- instructions: str
- """
- The instructions that the [assistant](/docs/api-reference/assistants) used for
- this run.
- """
-
- last_error: Optional[LastError] = None
- """The last error associated with this run. Will be `null` if there are no errors."""
-
- max_completion_tokens: Optional[int] = None
- """
- The maximum number of completion tokens specified to have been used over the
- course of the run.
- """
-
- max_prompt_tokens: Optional[int] = None
- """
- The maximum number of prompt tokens specified to have been used over the course
- of the run.
- """
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: str
- """
- The model that the [assistant](/docs/api-reference/assistants) used for this
- run.
- """
-
- object: Literal["thread.run"]
- """The object type, which is always `thread.run`."""
-
- parallel_tool_calls: bool
- """
- Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
- """
-
- required_action: Optional[RequiredAction] = None
- """Details on the action required to continue the run.
-
- Will be `null` if no action is required.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOption] = None
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- started_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run was started."""
-
- status: Literal[
- "queued",
- "in_progress",
- "requires_action",
- "cancelling",
- "cancelled",
- "failed",
- "completed",
- "incomplete",
- "expired",
- ]
- """
- The status of the run, which can be either `queued`, `in_progress`,
- `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
- `incomplete`, or `expired`.
- """
-
- thread_id: str
- """
- The ID of the [thread](/docs/api-reference/threads) that was executed on as a
- part of this run.
- """
-
- tool_choice: Optional[AssistantsAPIToolChoiceOption] = None
- """
- Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
- """
-
- tools: List[Tool]
- """
- The list of tools that the [assistant](/docs/api-reference/assistants) used for
- this run.
- """
-
- truncation_strategy: Optional[TruncationObject] = None
- """Controls for how a thread will be truncated prior to the run.
-
- Use this to control the intial context window of the run.
- """
-
- usage: Optional[Usage] = None
- """Usage statistics related to the run.
-
- This value will be `null` if the run is not in a terminal state (i.e.
- `in_progress`, `queued`, etc.).
- """
-
- temperature: Optional[float] = None
- """The sampling temperature used for this run. If not set, defaults to 1."""
-
- top_p: Optional[float] = None
- """The nucleus sampling value used for this run. If not set, defaults to 1."""
diff --git a/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py b/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py
deleted file mode 100644
index 77ab84ba..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["RunSubmitToolOutputsParams", "ToolOutput"]
-
-
-class RunSubmitToolOutputsParams(TypedDict, total=False):
- thread_id: Required[str]
-
- tool_outputs: Required[Iterable[ToolOutput]]
- """A list of tools for which the outputs are being submitted."""
-
- stream: Optional[bool]
- """
- If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
- """
-
-
-class ToolOutput(TypedDict, total=False):
- output: str
- """The output of the tool call to be submitted to continue the run."""
-
- tool_call_id: str
- """
- The ID of the tool call in the `required_action` object within the run object
- the output is being submitted for.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/run_update_params.py b/src/digitalocean_genai_sdk/types/threads/run_update_params.py
deleted file mode 100644
index 7b84a9b5..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_update_params.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["RunUpdateParams"]
-
-
-class RunUpdateParams(TypedDict, total=False):
- thread_id: Required[str]
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py b/src/digitalocean_genai_sdk/types/threads/runs/__init__.py
index 3cab1f9c..f8ee8b14 100644
--- a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py
+++ b/src/digitalocean_genai_sdk/types/threads/runs/__init__.py
@@ -1,8 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
-
-from .run_step_object import RunStepObject as RunStepObject
-from .step_list_params import StepListParams as StepListParams
-from .step_list_response import StepListResponse as StepListResponse
-from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py b/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py
deleted file mode 100644
index 3ede68fa..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ...._models import BaseModel
-from ...file_search_ranker import FileSearchRanker
-
-__all__ = [
- "RunStepObject",
- "LastError",
- "StepDetails",
- "StepDetailsRunStepDetailsMessageCreationObject",
- "StepDetailsRunStepDetailsMessageCreationObjectMessageCreation",
- "StepDetailsRunStepDetailsToolCallsObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCall",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction",
- "Usage",
-]
-
-
-class LastError(BaseModel):
- code: Literal["server_error", "rate_limit_exceeded"]
- """One of `server_error` or `rate_limit_exceeded`."""
-
- message: str
- """A human-readable description of the error."""
-
-
-class StepDetailsRunStepDetailsMessageCreationObjectMessageCreation(BaseModel):
- message_id: str
- """The ID of the message that was created by this run step."""
-
-
-class StepDetailsRunStepDetailsMessageCreationObject(BaseModel):
- message_creation: StepDetailsRunStepDetailsMessageCreationObjectMessageCreation
-
- type: Literal["message_creation"]
- """Always `message_creation`."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject(
- BaseModel
-):
- logs: str
- """The text output from the Code Interpreter tool call."""
-
- type: Literal["logs"]
- """Always `logs`."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage(
- BaseModel
-):
- file_id: str
- """The [file](/docs/api-reference/files) ID of the image."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject(
- BaseModel
-):
- image: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage
-
- type: Literal["image"]
- """Always `image`."""
-
-
-StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput: TypeAlias = Union[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject,
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject,
-]
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter(BaseModel):
- input: str
- """The input to the Code Interpreter tool call."""
-
- outputs: List[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput
- ]
- """The outputs from the Code Interpreter tool call.
-
- Code Interpreter can output one or more items, including text (`logs`) or images
- (`image`). Each of these are represented by a different object type.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject(BaseModel):
- id: str
- """The ID of the tool call."""
-
- code_interpreter: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter
- """The Code Interpreter tool call definition."""
-
- type: Literal["code_interpreter"]
- """The type of tool call.
-
- This is always going to be `code_interpreter` for this type of tool call.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions(
- BaseModel
-):
- ranker: FileSearchRanker
- """The ranker to use for the file search.
-
- If not specified will use the `auto` ranker.
- """
-
- score_threshold: float
- """The score threshold for the file search.
-
- All values must be a floating point number between 0 and 1.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent(
- BaseModel
-):
- text: Optional[str] = None
- """The text content of the file."""
-
- type: Optional[Literal["text"]] = None
- """The type of the content."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult(
- BaseModel
-):
- file_id: str
- """The ID of the file that result was found in."""
-
- file_name: str
- """The name of the file that result was found in."""
-
- score: float
- """The score of the result.
-
- All values must be a floating point number between 0 and 1.
- """
-
- content: Optional[
- List[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent
- ]
- ] = None
- """The content of the result that was found.
-
- The content is only included if requested via the include query parameter.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch(BaseModel):
- ranking_options: Optional[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions
- ] = None
- """The ranking options for the file search."""
-
- results: Optional[
- List[StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult]
- ] = None
- """The results of the file search."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject(BaseModel):
- id: str
- """The ID of the tool call object."""
-
- file_search: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch
- """For now, this is always going to be an empty object."""
-
- type: Literal["file_search"]
- """The type of tool call.
-
- This is always going to be `file_search` for this type of tool call.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction(BaseModel):
- arguments: str
- """The arguments passed to the function."""
-
- name: str
- """The name of the function."""
-
- output: Optional[str] = None
- """The output of the function.
-
- This will be `null` if the outputs have not been
- [submitted](/docs/api-reference/runs/submitToolOutputs) yet.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject(BaseModel):
- id: str
- """The ID of the tool call object."""
-
- function: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction
- """The definition of the function that was called."""
-
- type: Literal["function"]
- """The type of tool call.
-
- This is always going to be `function` for this type of tool call.
- """
-
-
-StepDetailsRunStepDetailsToolCallsObjectToolCall: TypeAlias = Union[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject,
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject,
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject,
-]
-
-
-class StepDetailsRunStepDetailsToolCallsObject(BaseModel):
- tool_calls: List[StepDetailsRunStepDetailsToolCallsObjectToolCall]
- """An array of tool calls the run step was involved in.
-
- These can be associated with one of three types of tools: `code_interpreter`,
- `file_search`, or `function`.
- """
-
- type: Literal["tool_calls"]
- """Always `tool_calls`."""
-
-
-StepDetails: TypeAlias = Union[StepDetailsRunStepDetailsMessageCreationObject, StepDetailsRunStepDetailsToolCallsObject]
-
-
-class Usage(BaseModel):
- completion_tokens: int
- """Number of completion tokens used over the course of the run step."""
-
- prompt_tokens: int
- """Number of prompt tokens used over the course of the run step."""
-
- total_tokens: int
- """Total number of tokens used (prompt + completion)."""
-
-
-class RunStepObject(BaseModel):
- id: str
- """The identifier of the run step, which can be referenced in API endpoints."""
-
- assistant_id: str
- """
- The ID of the [assistant](/docs/api-reference/assistants) associated with the
- run step.
- """
-
- cancelled_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run step was cancelled."""
-
- completed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run step completed."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the run step was created."""
-
- expired_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run step expired.
-
- A step is considered expired if the parent run is expired.
- """
-
- failed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run step failed."""
-
- last_error: Optional[LastError] = None
- """The last error associated with this run step.
-
- Will be `null` if there are no errors.
- """
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- object: Literal["thread.run.step"]
- """The object type, which is always `thread.run.step`."""
-
- run_id: str
- """The ID of the [run](/docs/api-reference/runs) that this run step is a part of."""
-
- status: Literal["in_progress", "cancelled", "failed", "completed", "expired"]
- """
- The status of the run step, which can be either `in_progress`, `cancelled`,
- `failed`, `completed`, or `expired`.
- """
-
- step_details: StepDetails
- """The details of the run step."""
-
- thread_id: str
- """The ID of the [thread](/docs/api-reference/threads) that was run."""
-
- type: Literal["message_creation", "tool_calls"]
- """The type of run step, which can be either `message_creation` or `tool_calls`."""
-
- usage: Optional[Usage] = None
- """Usage statistics related to the run step.
-
- This value will be `null` while the run step's status is `in_progress`.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py b/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py
deleted file mode 100644
index 6383fcb3..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["StepListParams"]
-
-
-class StepListParams(TypedDict, total=False):
- thread_id: Required[str]
-
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]]
- """A list of additional fields to include in the response.
-
- Currently the only supported value is
- `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
- search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py b/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py
deleted file mode 100644
index 93ccb4ca..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ...._models import BaseModel
-from .run_step_object import RunStepObject
-
-__all__ = ["StepListResponse"]
-
-
-class StepListResponse(BaseModel):
- data: List[RunStepObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py b/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py
deleted file mode 100644
index ce6bcbfb..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["StepRetrieveParams"]
-
-
-class StepRetrieveParams(TypedDict, total=False):
- thread_id: Required[str]
-
- run_id: Required[str]
-
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]]
- """A list of additional fields to include in the response.
-
- Currently the only supported value is
- `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
- search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/truncation_object.py b/src/digitalocean_genai_sdk/types/threads/truncation_object.py
deleted file mode 100644
index 7c81b3b5..00000000
--- a/src/digitalocean_genai_sdk/types/threads/truncation_object.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["TruncationObject"]
-
-
-class TruncationObject(BaseModel):
- type: Literal["auto", "last_messages"]
- """The truncation strategy to use for the thread.
-
- The default is `auto`. If set to `last_messages`, the thread will be truncated
- to the n most recent messages in the thread. When set to `auto`, messages in the
- middle of the thread will be dropped to fit the context length of the model,
- `max_prompt_tokens`.
- """
-
- last_messages: Optional[int] = None
- """
- The number of most recent messages from the thread when constructing the context
- for the run.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py b/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py
deleted file mode 100644
index 98d942fa..00000000
--- a/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["TruncationObjectParam"]
-
-
-class TruncationObjectParam(TypedDict, total=False):
- type: Required[Literal["auto", "last_messages"]]
- """The truncation strategy to use for the thread.
-
- The default is `auto`. If set to `last_messages`, the thread will be truncated
- to the n most recent messages in the thread. When set to `auto`, messages in the
- middle of the thread will be dropped to fit the context length of the model,
- `max_prompt_tokens`.
- """
-
- last_messages: Optional[int]
- """
- The number of most recent messages from the thread when constructing the context
- for the run.
- """
diff --git a/src/digitalocean_genai_sdk/types/transcription_segment.py b/src/digitalocean_genai_sdk/types/transcription_segment.py
deleted file mode 100644
index 2345fa18..00000000
--- a/src/digitalocean_genai_sdk/types/transcription_segment.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-
-__all__ = ["TranscriptionSegment"]
-
-
-class TranscriptionSegment(BaseModel):
- id: int
- """Unique identifier of the segment."""
-
- avg_logprob: float
- """Average logprob of the segment.
-
- If the value is lower than -1, consider the logprobs failed.
- """
-
- compression_ratio: float
- """Compression ratio of the segment.
-
- If the value is greater than 2.4, consider the compression failed.
- """
-
- end: float
- """End time of the segment in seconds."""
-
- no_speech_prob: float
- """Probability of no speech in the segment.
-
- If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this
- segment silent.
- """
-
- seek: int
- """Seek offset of the segment."""
-
- start: float
- """Start time of the segment in seconds."""
-
- temperature: float
- """Temperature parameter used for generating the segment."""
-
- text: str
- """Text content of the segment."""
-
- tokens: List[int]
- """Array of token IDs for the text content."""
diff --git a/src/digitalocean_genai_sdk/types/upload.py b/src/digitalocean_genai_sdk/types/upload.py
deleted file mode 100644
index 06b8a806..00000000
--- a/src/digitalocean_genai_sdk/types/upload.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .openai_file import OpenAIFile
-
-__all__ = ["Upload"]
-
-
-class Upload(BaseModel):
- id: str
- """The Upload unique identifier, which can be referenced in API endpoints."""
-
- bytes: int
- """The intended number of bytes to be uploaded."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the Upload was created."""
-
- expires_at: int
- """The Unix timestamp (in seconds) for when the Upload will expire."""
-
- filename: str
- """The name of the file to be uploaded."""
-
- purpose: str
- """The intended purpose of the file.
-
- [Please refer here](/docs/api-reference/files/object#files/object-purpose) for
- acceptable values.
- """
-
- status: Literal["pending", "completed", "cancelled", "expired"]
- """The status of the Upload."""
-
- file: Optional[OpenAIFile] = None
- """The `File` object represents a document that has been uploaded to OpenAI."""
-
- object: Optional[Literal["upload"]] = None
- """The object type, which is always "upload"."""
diff --git a/src/digitalocean_genai_sdk/types/upload_add_part_params.py b/src/digitalocean_genai_sdk/types/upload_add_part_params.py
deleted file mode 100644
index a0c8b61c..00000000
--- a/src/digitalocean_genai_sdk/types/upload_add_part_params.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["UploadAddPartParams"]
-
-
-class UploadAddPartParams(TypedDict, total=False):
- data: Required[FileTypes]
- """The chunk of bytes for this Part."""
diff --git a/src/digitalocean_genai_sdk/types/upload_add_part_response.py b/src/digitalocean_genai_sdk/types/upload_add_part_response.py
deleted file mode 100644
index fb091f76..00000000
--- a/src/digitalocean_genai_sdk/types/upload_add_part_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["UploadAddPartResponse"]
-
-
-class UploadAddPartResponse(BaseModel):
- id: str
- """The upload Part unique identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the Part was created."""
-
- object: Literal["upload.part"]
- """The object type, which is always `upload.part`."""
-
- upload_id: str
- """The ID of the Upload object that this Part was added to."""
diff --git a/src/digitalocean_genai_sdk/types/upload_complete_params.py b/src/digitalocean_genai_sdk/types/upload_complete_params.py
deleted file mode 100644
index cce568d5..00000000
--- a/src/digitalocean_genai_sdk/types/upload_complete_params.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Required, TypedDict
-
-__all__ = ["UploadCompleteParams"]
-
-
-class UploadCompleteParams(TypedDict, total=False):
- part_ids: Required[List[str]]
- """The ordered list of Part IDs."""
-
- md5: str
- """
- The optional md5 checksum for the file contents to verify if the bytes uploaded
- matches what you expect.
- """
diff --git a/src/digitalocean_genai_sdk/types/upload_create_params.py b/src/digitalocean_genai_sdk/types/upload_create_params.py
deleted file mode 100644
index eab9a51b..00000000
--- a/src/digitalocean_genai_sdk/types/upload_create_params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UploadCreateParams"]
-
-
-class UploadCreateParams(TypedDict, total=False):
- bytes: Required[int]
- """The number of bytes in the file you are uploading."""
-
- filename: Required[str]
- """The name of the file to upload."""
-
- mime_type: Required[str]
- """The MIME type of the file.
-
- This must fall within the supported MIME types for your file purpose. See the
- supported MIME types for assistants and vision.
- """
-
- purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]]
- """The intended purpose of the uploaded file.
-
- See the
- [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).
- """
diff --git a/src/digitalocean_genai_sdk/types/usage_response.py b/src/digitalocean_genai_sdk/types/usage_response.py
deleted file mode 100644
index 9f70e7c4..00000000
--- a/src/digitalocean_genai_sdk/types/usage_response.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-
-__all__ = [
- "UsageResponse",
- "Data",
- "DataResult",
- "DataResultUsageCompletionsResult",
- "DataResultUsageEmbeddingsResult",
- "DataResultUsageModerationsResult",
- "DataResultUsageImagesResult",
- "DataResultUsageAudioSpeechesResult",
- "DataResultUsageAudioTranscriptionsResult",
- "DataResultUsageVectorStoresResult",
- "DataResultUsageCodeInterpreterSessionsResult",
- "DataResultCostsResult",
- "DataResultCostsResultAmount",
-]
-
-
-class DataResultUsageCompletionsResult(BaseModel):
- input_tokens: int
- """The aggregated number of text input tokens used, including cached tokens.
-
- For customers subscribe to scale tier, this includes scale tier tokens.
- """
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.completions.result"]
-
- output_tokens: int
- """The aggregated number of text output tokens used.
-
- For customers subscribe to scale tier, this includes scale tier tokens.
- """
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- batch: Optional[bool] = None
- """
- When `group_by=batch`, this field tells whether the grouped usage result is
- batch or not.
- """
-
- input_audio_tokens: Optional[int] = None
- """The aggregated number of audio input tokens used, including cached tokens."""
-
- input_cached_tokens: Optional[int] = None
- """
- The aggregated number of text input tokens that has been cached from previous
- requests. For customers subscribe to scale tier, this includes scale tier
- tokens.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- output_audio_tokens: Optional[int] = None
- """The aggregated number of audio output tokens used."""
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageEmbeddingsResult(BaseModel):
- input_tokens: int
- """The aggregated number of input tokens used."""
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.embeddings.result"]
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageModerationsResult(BaseModel):
- input_tokens: int
- """The aggregated number of input tokens used."""
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.moderations.result"]
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageImagesResult(BaseModel):
- images: int
- """The number of images processed."""
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.images.result"]
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- size: Optional[str] = None
- """
- When `group_by=size`, this field provides the image size of the grouped usage
- result.
- """
-
- source: Optional[str] = None
- """
- When `group_by=source`, this field provides the source of the grouped usage
- result, possible values are `image.generation`, `image.edit`, `image.variation`.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageAudioSpeechesResult(BaseModel):
- characters: int
- """The number of characters processed."""
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.audio_speeches.result"]
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageAudioTranscriptionsResult(BaseModel):
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.audio_transcriptions.result"]
-
- seconds: int
- """The number of seconds processed."""
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageVectorStoresResult(BaseModel):
- object: Literal["organization.usage.vector_stores.result"]
-
- usage_bytes: int
- """The vector stores usage in bytes."""
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
-
-class DataResultUsageCodeInterpreterSessionsResult(BaseModel):
- object: Literal["organization.usage.code_interpreter_sessions.result"]
-
- num_sessions: Optional[int] = None
- """The number of code interpreter sessions."""
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
-
-class DataResultCostsResultAmount(BaseModel):
- currency: Optional[str] = None
- """Lowercase ISO-4217 currency e.g. "usd" """
-
- value: Optional[float] = None
- """The numeric value of the cost."""
-
-
-class DataResultCostsResult(BaseModel):
- object: Literal["organization.costs.result"]
-
- amount: Optional[DataResultCostsResultAmount] = None
- """The monetary value in its associated currency."""
-
- line_item: Optional[str] = None
- """
- When `group_by=line_item`, this field provides the line item of the grouped
- costs result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- costs result.
- """
-
-
-DataResult: TypeAlias = Union[
- DataResultUsageCompletionsResult,
- DataResultUsageEmbeddingsResult,
- DataResultUsageModerationsResult,
- DataResultUsageImagesResult,
- DataResultUsageAudioSpeechesResult,
- DataResultUsageAudioTranscriptionsResult,
- DataResultUsageVectorStoresResult,
- DataResultUsageCodeInterpreterSessionsResult,
- DataResultCostsResult,
-]
-
-
-class Data(BaseModel):
- end_time: int
-
- object: Literal["bucket"]
-
- result: List[DataResult]
-
- start_time: int
-
-
-class UsageResponse(BaseModel):
- data: List[Data]
-
- has_more: bool
-
- next_page: str
-
- object: Literal["page"]
diff --git a/src/digitalocean_genai_sdk/types/vector_store_create_params.py b/src/digitalocean_genai_sdk/types/vector_store_create_params.py
deleted file mode 100644
index 48118e80..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_create_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import TypeAlias, TypedDict
-
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam
-from .auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam
-from .static_chunking_strategy_request_param import StaticChunkingStrategyRequestParam
-
-__all__ = ["VectorStoreCreateParams", "ChunkingStrategy"]
-
-
-class VectorStoreCreateParams(TypedDict, total=False):
- chunking_strategy: ChunkingStrategy
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy. Only applicable if `file_ids` is
- non-empty.
- """
-
- expires_after: VectorStoreExpirationAfterParam
- """The expiration policy for a vector store."""
-
- file_ids: List[str]
- """A list of [File](/docs/api-reference/files) IDs that the vector store should
- use.
-
- Useful for tools like `file_search` that can access files.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- name: str
- """The name of the vector store."""
-
-
-ChunkingStrategy: TypeAlias = Union[AutoChunkingStrategyRequestParam, StaticChunkingStrategyRequestParam]
diff --git a/src/digitalocean_genai_sdk/types/vector_store_delete_response.py b/src/digitalocean_genai_sdk/types/vector_store_delete_response.py
deleted file mode 100644
index 17d3ee21..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["VectorStoreDeleteResponse"]
-
-
-class VectorStoreDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["vector_store.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py b/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py
deleted file mode 100644
index 1d417d52..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["VectorStoreExpirationAfter"]
-
-
-class VectorStoreExpirationAfter(BaseModel):
- anchor: Literal["last_active_at"]
- """Anchor timestamp after which the expiration policy applies.
-
- Supported anchors: `last_active_at`.
- """
-
- days: int
- """The number of days after the anchor time that the vector store will expire."""
diff --git a/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py b/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py
deleted file mode 100644
index 29a008c7..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["VectorStoreExpirationAfterParam"]
-
-
-class VectorStoreExpirationAfterParam(TypedDict, total=False):
- anchor: Required[Literal["last_active_at"]]
- """Anchor timestamp after which the expiration policy applies.
-
- Supported anchors: `last_active_at`.
- """
-
- days: Required[int]
- """The number of days after the anchor time that the vector store will expire."""
diff --git a/src/digitalocean_genai_sdk/types/vector_store_list_params.py b/src/digitalocean_genai_sdk/types/vector_store_list_params.py
deleted file mode 100644
index e26ff90a..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_list_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["VectorStoreListParams"]
-
-
-class VectorStoreListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_store_list_response.py b/src/digitalocean_genai_sdk/types/vector_store_list_response.py
deleted file mode 100644
index 2dc455ea..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-from .vector_store_object import VectorStoreObject
-
-__all__ = ["VectorStoreListResponse"]
-
-
-class VectorStoreListResponse(BaseModel):
- data: List[VectorStoreObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/vector_store_object.py b/src/digitalocean_genai_sdk/types/vector_store_object.py
deleted file mode 100644
index ebd52a31..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_object.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .vector_store_expiration_after import VectorStoreExpirationAfter
-
-__all__ = ["VectorStoreObject", "FileCounts"]
-
-
-class FileCounts(BaseModel):
- cancelled: int
- """The number of files that were cancelled."""
-
- completed: int
- """The number of files that have been successfully processed."""
-
- failed: int
- """The number of files that have failed to process."""
-
- in_progress: int
- """The number of files that are currently being processed."""
-
- total: int
- """The total number of files."""
-
-
-class VectorStoreObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the vector store was created."""
-
- file_counts: FileCounts
-
- last_active_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the vector store was last active."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- name: str
- """The name of the vector store."""
-
- object: Literal["vector_store"]
- """The object type, which is always `vector_store`."""
-
- status: Literal["expired", "in_progress", "completed"]
- """
- The status of the vector store, which can be either `expired`, `in_progress`, or
- `completed`. A status of `completed` indicates that the vector store is ready
- for use.
- """
-
- usage_bytes: int
- """The total number of bytes used by the files in the vector store."""
-
- expires_after: Optional[VectorStoreExpirationAfter] = None
- """The expiration policy for a vector store."""
-
- expires_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the vector store will expire."""
diff --git a/src/digitalocean_genai_sdk/types/vector_store_search_params.py b/src/digitalocean_genai_sdk/types/vector_store_search_params.py
deleted file mode 100644
index 5b90b063..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_search_params.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .compound_filter_param import CompoundFilterParam
-from .comparison_filter_param import ComparisonFilterParam
-
-__all__ = ["VectorStoreSearchParams", "Filters", "RankingOptions"]
-
-
-class VectorStoreSearchParams(TypedDict, total=False):
- query: Required[Union[str, List[str]]]
- """A query string for a search"""
-
- filters: Filters
- """A filter to apply based on file attributes."""
-
- max_num_results: int
- """The maximum number of results to return.
-
- This number should be between 1 and 50 inclusive.
- """
-
- ranking_options: RankingOptions
- """Ranking options for search."""
-
- rewrite_query: bool
- """Whether to rewrite the natural language query for vector search."""
-
-
-Filters: TypeAlias = Union[ComparisonFilterParam, CompoundFilterParam]
-
-
-class RankingOptions(TypedDict, total=False):
- ranker: Literal["auto", "default-2024-11-15"]
-
- score_threshold: float
diff --git a/src/digitalocean_genai_sdk/types/vector_store_search_response.py b/src/digitalocean_genai_sdk/types/vector_store_search_response.py
deleted file mode 100644
index b303f7ea..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_search_response.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["VectorStoreSearchResponse", "Data", "DataContent"]
-
-
-class DataContent(BaseModel):
- text: str
- """The text content returned from search."""
-
- type: Literal["text"]
- """The type of content."""
-
-
-class Data(BaseModel):
- attributes: Optional[Dict[str, Union[str, float, bool]]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- content: List[DataContent]
- """Content chunks from the file."""
-
- file_id: str
- """The ID of the vector store file."""
-
- filename: str
- """The name of the vector store file."""
-
- score: float
- """The similarity score for the result."""
-
-
-class VectorStoreSearchResponse(BaseModel):
- data: List[Data]
- """The list of search result items."""
-
- has_more: bool
- """Indicates if there are more results to fetch."""
-
- next_page: Optional[str] = None
- """The token for the next page, if any."""
-
- object: Literal["vector_store.search_results.page"]
- """The object type, which is always `vector_store.search_results.page`"""
-
- search_query: List[str]
diff --git a/src/digitalocean_genai_sdk/types/vector_store_update_params.py b/src/digitalocean_genai_sdk/types/vector_store_update_params.py
deleted file mode 100644
index a9400cf2..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_update_params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import TypedDict
-
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam
-
-__all__ = ["VectorStoreUpdateParams"]
-
-
-class VectorStoreUpdateParams(TypedDict, total=False):
- expires_after: Optional[VectorStoreExpirationAfterParam]
- """The expiration policy for a vector store."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- name: Optional[str]
- """The name of the vector store."""
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py b/src/digitalocean_genai_sdk/types/vector_stores/__init__.py
index 5018f06d..f8ee8b14 100644
--- a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py
+++ b/src/digitalocean_genai_sdk/types/vector_stores/__init__.py
@@ -1,15 +1,3 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
-
-from .file_list_params import FileListParams as FileListParams
-from .file_create_params import FileCreateParams as FileCreateParams
-from .file_update_params import FileUpdateParams as FileUpdateParams
-from .file_delete_response import FileDeleteResponse as FileDeleteResponse
-from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams
-from .vector_store_file_object import VectorStoreFileObject as VectorStoreFileObject
-from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams
-from .file_retrieve_content_response import FileRetrieveContentResponse as FileRetrieveContentResponse
-from .vector_store_file_batch_object import VectorStoreFileBatchObject as VectorStoreFileBatchObject
-from .chunking_strategy_request_param import ChunkingStrategyRequestParam as ChunkingStrategyRequestParam
-from .list_vector_store_files_response import ListVectorStoreFilesResponse as ListVectorStoreFilesResponse
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py
deleted file mode 100644
index 1dab9558..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import TypeAlias
-
-from ..auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam
-from ..static_chunking_strategy_request_param import StaticChunkingStrategyRequestParam
-
-__all__ = ["ChunkingStrategyRequestParam"]
-
-ChunkingStrategyRequestParam: TypeAlias = Union[AutoChunkingStrategyRequestParam, StaticChunkingStrategyRequestParam]
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py
deleted file mode 100644
index 2e2bf227..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Required, TypedDict
-
-from .chunking_strategy_request_param import ChunkingStrategyRequestParam
-
-__all__ = ["FileBatchCreateParams"]
-
-
-class FileBatchCreateParams(TypedDict, total=False):
- file_ids: Required[List[str]]
- """A list of [File](/docs/api-reference/files) IDs that the vector store should
- use.
-
- Useful for tools like `file_search` that can access files.
- """
-
- attributes: Optional[Dict[str, Union[str, float, bool]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- chunking_strategy: ChunkingStrategyRequestParam
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py
deleted file mode 100644
index 2a0a6c6a..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FileBatchListFilesParams"]
-
-
-class FileBatchListFilesParams(TypedDict, total=False):
- vector_store_id: Required[str]
-
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- filter: Literal["in_progress", "completed", "failed", "cancelled"]
- """Filter by file status.
-
- One of `in_progress`, `completed`, `failed`, `cancelled`.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py
deleted file mode 100644
index 6183f4e7..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Optional
-from typing_extensions import Required, TypedDict
-
-from .chunking_strategy_request_param import ChunkingStrategyRequestParam
-
-__all__ = ["FileCreateParams"]
-
-
-class FileCreateParams(TypedDict, total=False):
- file_id: Required[str]
- """A [File](/docs/api-reference/files) ID that the vector store should use.
-
- Useful for tools like `file_search` that can access files.
- """
-
- attributes: Optional[Dict[str, Union[str, float, bool]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- chunking_strategy: ChunkingStrategyRequestParam
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py b/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py
deleted file mode 100644
index 24fbe570..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["FileDeleteResponse"]
-
-
-class FileDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["vector_store.file.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py
deleted file mode 100644
index 867b5fb3..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["FileListParams"]
-
-
-class FileListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- filter: Literal["in_progress", "completed", "failed", "cancelled"]
- """Filter by file status.
-
- One of `in_progress`, `completed`, `failed`, `cancelled`.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py b/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py
deleted file mode 100644
index e4f0966c..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["FileRetrieveContentResponse", "Data"]
-
-
-class Data(BaseModel):
- text: Optional[str] = None
- """The text content"""
-
- type: Optional[str] = None
- """The content type (currently only `"text"`)"""
-
-
-class FileRetrieveContentResponse(BaseModel):
- data: List[Data]
- """Parsed content of the file."""
-
- has_more: bool
- """Indicates if there are more content pages to fetch."""
-
- next_page: Optional[str] = None
- """The token for the next page, if any."""
-
- object: Literal["vector_store.file_content.page"]
- """The object type, which is always `vector_store.file_content.page`"""
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py
deleted file mode 100644
index ebf540d0..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["FileUpdateParams"]
-
-
-class FileUpdateParams(TypedDict, total=False):
- vector_store_id: Required[str]
-
- attributes: Required[Optional[Dict[str, Union[str, float, bool]]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py b/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py
deleted file mode 100644
index dc997962..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ..._models import BaseModel
-from .vector_store_file_object import VectorStoreFileObject
-
-__all__ = ["ListVectorStoreFilesResponse"]
-
-
-class ListVectorStoreFilesResponse(BaseModel):
- data: List[VectorStoreFileObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py
deleted file mode 100644
index 3d5aa1bd..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["VectorStoreFileBatchObject", "FileCounts"]
-
-
-class FileCounts(BaseModel):
- cancelled: int
- """The number of files that where cancelled."""
-
- completed: int
- """The number of files that have been processed."""
-
- failed: int
- """The number of files that have failed to process."""
-
- in_progress: int
- """The number of files that are currently being processed."""
-
- total: int
- """The total number of files."""
-
-
-class VectorStoreFileBatchObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """
- The Unix timestamp (in seconds) for when the vector store files batch was
- created.
- """
-
- file_counts: FileCounts
-
- object: Literal["vector_store.files_batch"]
- """The object type, which is always `vector_store.file_batch`."""
-
- status: Literal["in_progress", "completed", "cancelled", "failed"]
- """
- The status of the vector store files batch, which can be either `in_progress`,
- `completed`, `cancelled` or `failed`.
- """
-
- vector_store_id: str
- """
- The ID of the [vector store](/docs/api-reference/vector-stores/object) that the
- [File](/docs/api-reference/files) is attached to.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py
deleted file mode 100644
index e28e28a6..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ..._models import BaseModel
-from ..static_chunking_strategy import StaticChunkingStrategy
-
-__all__ = [
- "VectorStoreFileObject",
- "LastError",
- "ChunkingStrategy",
- "ChunkingStrategyStaticChunkingStrategyResponseParam",
- "ChunkingStrategyOtherChunkingStrategyResponseParam",
-]
-
-
-class LastError(BaseModel):
- code: Literal["server_error", "unsupported_file", "invalid_file"]
- """One of `server_error` or `rate_limit_exceeded`."""
-
- message: str
- """A human-readable description of the error."""
-
-
-class ChunkingStrategyStaticChunkingStrategyResponseParam(BaseModel):
- static: StaticChunkingStrategy
-
- type: Literal["static"]
- """Always `static`."""
-
-
-class ChunkingStrategyOtherChunkingStrategyResponseParam(BaseModel):
- type: Literal["other"]
- """Always `other`."""
-
-
-ChunkingStrategy: TypeAlias = Union[
- ChunkingStrategyStaticChunkingStrategyResponseParam, ChunkingStrategyOtherChunkingStrategyResponseParam
-]
-
-
-class VectorStoreFileObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the vector store file was created."""
-
- last_error: Optional[LastError] = None
- """The last error associated with this vector store file.
-
- Will be `null` if there are no errors.
- """
-
- object: Literal["vector_store.file"]
- """The object type, which is always `vector_store.file`."""
-
- status: Literal["in_progress", "completed", "cancelled", "failed"]
- """
- The status of the vector store file, which can be either `in_progress`,
- `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
- vector store file is ready for use.
- """
-
- usage_bytes: int
- """The total vector store usage in bytes.
-
- Note that this may be different from the original file size.
- """
-
- vector_store_id: str
- """
- The ID of the [vector store](/docs/api-reference/vector-stores/object) that the
- [File](/docs/api-reference/files) is attached to.
- """
-
- attributes: Optional[Dict[str, Union[str, float, bool]]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- chunking_strategy: Optional[ChunkingStrategy] = None
- """The strategy used to chunk the file."""
diff --git a/src/digitalocean_genai_sdk/types/voice_ids_shared.py b/src/digitalocean_genai_sdk/types/voice_ids_shared.py
deleted file mode 100644
index 5679bda3..00000000
--- a/src/digitalocean_genai_sdk/types/voice_ids_shared.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["VoiceIDsShared"]
-
-VoiceIDsShared: TypeAlias = Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-]
diff --git a/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py b/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py
deleted file mode 100644
index ccbd853d..00000000
--- a/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["VoiceIDsSharedParam"]
-
-VoiceIDsSharedParam: TypeAlias = Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-]
diff --git a/src/digitalocean_genai_sdk/types/web_search_tool_call.py b/src/digitalocean_genai_sdk/types/web_search_tool_call.py
deleted file mode 100644
index 1b57ab87..00000000
--- a/src/digitalocean_genai_sdk/types/web_search_tool_call.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["WebSearchToolCall"]
-
-
-class WebSearchToolCall(BaseModel):
- id: str
- """The unique ID of the web search tool call."""
-
- status: Literal["in_progress", "searching", "completed", "failed"]
- """The status of the web search tool call."""
-
- type: Literal["web_search_call"]
- """The type of the web search tool call. Always `web_search_call`."""
diff --git a/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py b/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py
deleted file mode 100644
index 39e5c502..00000000
--- a/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["WebSearchToolCallParam"]
-
-
-class WebSearchToolCallParam(TypedDict, total=False):
- id: Required[str]
- """The unique ID of the web search tool call."""
-
- status: Required[Literal["in_progress", "searching", "completed", "failed"]]
- """The status of the web search tool call."""
-
- type: Required[Literal["web_search_call"]]
- """The type of the web search tool call. Always `web_search_call`."""
diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py
index b065b83d..9c7ff505 100644
--- a/tests/api_resources/chat/test_completions.py
+++ b/tests/api_resources/chat/test_completions.py
@@ -9,12 +9,7 @@
from tests.utils import assert_matches_type
from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.chat import (
- CreateResponse,
- CompletionListResponse,
- CompletionDeleteResponse,
- CompletionListMessagesResponse,
-)
+from digitalocean_genai_sdk.types.chat import CreateResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -29,10 +24,10 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
+ model="llama3-8b-instruct",
)
assert_matches_type(CreateResponse, completion, path=["response"])
@@ -43,73 +38,25 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No
messages=[
{
"content": "string",
- "role": "developer",
- "name": "name",
+ "role": "system",
}
],
- model="gpt-4o",
- audio={
- "format": "wav",
- "voice": "ash",
- },
+ model="llama3-8b-instruct",
frequency_penalty=-2,
- function_call="none",
- functions=[
- {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- }
- ],
logit_bias={"foo": 0},
logprobs=True,
- max_completion_tokens=0,
+ max_completion_tokens=256,
max_tokens=0,
metadata={"foo": "string"},
- modalities=["text"],
n=1,
- parallel_tool_calls=True,
- prediction={
- "content": "string",
- "type": "content",
- },
presence_penalty=-2,
- reasoning_effort="low",
- response_format={"type": "text"},
- seed=0,
- service_tier="auto",
stop="\n",
- store=True,
stream=True,
stream_options={"include_usage": True},
temperature=1,
- tool_choice="none",
- tools=[
- {
- "function": {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- "strict": True,
- },
- "type": "function",
- }
- ],
top_logprobs=0,
top_p=1,
user="user-1234",
- web_search_options={
- "search_context_size": "low",
- "user_location": {
- "approximate": {
- "city": "city",
- "country": "country",
- "region": "region",
- "timezone": "timezone",
- },
- "type": "approximate",
- },
- },
)
assert_matches_type(CreateResponse, completion, path=["response"])
@@ -120,10 +67,10 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
+ model="llama3-8b-instruct",
)
assert response.is_closed is True
@@ -138,89 +85,10 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.retrieve(
- "completion_id",
- )
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.retrieve(
- "completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.retrieve(
- "completion_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- client.chat.completions.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
+ model="llama3-8b-instruct",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -230,150 +98,6 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- client.chat.completions.with_raw_response.update(
- completion_id="",
- metadata={"foo": "string"},
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.list()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.list(
- after="after",
- limit=0,
- metadata={"foo": "string"},
- model="model",
- order="asc",
- )
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.delete(
- "completion_id",
- )
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.delete(
- "completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.delete(
- "completion_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- client.chat.completions.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_messages(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.list_messages(
- completion_id="completion_id",
- )
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_messages_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.list_messages(
- completion_id="completion_id",
- after="after",
- limit=0,
- order="asc",
- )
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_messages(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.list_messages(
- completion_id="completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_messages(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.list_messages(
- completion_id="completion_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list_messages(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- client.chat.completions.with_raw_response.list_messages(
- completion_id="",
- )
-
class TestAsyncCompletions:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
@@ -385,10 +109,10 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
+ model="llama3-8b-instruct",
)
assert_matches_type(CreateResponse, completion, path=["response"])
@@ -399,73 +123,25 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce
messages=[
{
"content": "string",
- "role": "developer",
- "name": "name",
+ "role": "system",
}
],
- model="gpt-4o",
- audio={
- "format": "wav",
- "voice": "ash",
- },
+ model="llama3-8b-instruct",
frequency_penalty=-2,
- function_call="none",
- functions=[
- {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- }
- ],
logit_bias={"foo": 0},
logprobs=True,
- max_completion_tokens=0,
+ max_completion_tokens=256,
max_tokens=0,
metadata={"foo": "string"},
- modalities=["text"],
n=1,
- parallel_tool_calls=True,
- prediction={
- "content": "string",
- "type": "content",
- },
presence_penalty=-2,
- reasoning_effort="low",
- response_format={"type": "text"},
- seed=0,
- service_tier="auto",
stop="\n",
- store=True,
stream=True,
stream_options={"include_usage": True},
temperature=1,
- tool_choice="none",
- tools=[
- {
- "function": {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- "strict": True,
- },
- "type": "function",
- }
- ],
top_logprobs=0,
top_p=1,
user="user-1234",
- web_search_options={
- "search_context_size": "low",
- "user_location": {
- "approximate": {
- "city": "city",
- "country": "country",
- "region": "region",
- "timezone": "timezone",
- },
- "type": "approximate",
- },
- },
)
assert_matches_type(CreateResponse, completion, path=["response"])
@@ -476,10 +152,10 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
+ model="llama3-8b-instruct",
)
assert response.is_closed is True
@@ -494,89 +170,10 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.retrieve(
- "completion_id",
- )
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.retrieve(
- "completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.retrieve(
- "completion_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- await async_client.chat.completions.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
+ model="llama3-8b-instruct",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -585,147 +182,3 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe
assert_matches_type(CreateResponse, completion, path=["response"])
assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- await async_client.chat.completions.with_raw_response.update(
- completion_id="",
- metadata={"foo": "string"},
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.list()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.list(
- after="after",
- limit=0,
- metadata={"foo": "string"},
- model="model",
- order="asc",
- )
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.delete(
- "completion_id",
- )
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.delete(
- "completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.delete(
- "completion_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- await async_client.chat.completions.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.list_messages(
- completion_id="completion_id",
- )
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_messages_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.list_messages(
- completion_id="completion_id",
- after="after",
- limit=0,
- order="asc",
- )
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.list_messages(
- completion_id="completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.list_messages(
- completion_id="completion_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- await async_client.chat.completions.with_raw_response.list_messages(
- completion_id="",
- )
diff --git a/tests/api_resources/fine_tuning/__init__.py b/tests/api_resources/fine_tuning/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/fine_tuning/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/fine_tuning/checkpoints/__init__.py b/tests/api_resources/fine_tuning/checkpoints/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/fine_tuning/checkpoints/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
deleted file mode 100644
index 1983d90a..00000000
--- a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
+++ /dev/null
@@ -1,309 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.fine_tuning.checkpoints import (
- PermissionDeleteResponse,
- ListFineTuningCheckpointPermission,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestPermissions:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- permission = client.fine_tuning.checkpoints.permissions.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.checkpoints.permissions.with_raw_response.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.checkpoints.permissions.with_streaming_response.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- client.fine_tuning.checkpoints.permissions.with_raw_response.create(
- permission_id="",
- project_ids=["string"],
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- permission = client.fine_tuning.checkpoints.permissions.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- permission = client.fine_tuning.checkpoints.permissions.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- order="ascending",
- project_id="project_id",
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
- permission_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- permission = client.fine_tuning.checkpoints.permissions.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- )
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = response.parse()
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = response.parse()
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncPermissions:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = await response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = await response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create(
- permission_id="",
- project_ids=["string"],
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- order="ascending",
- project_id="project_id",
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = await response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = await response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
- permission_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- )
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = await response.parse()
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = await response.parse()
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/fine_tuning/jobs/__init__.py b/tests/api_resources/fine_tuning/jobs/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/fine_tuning/jobs/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py
deleted file mode 100644
index f94416f9..00000000
--- a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.fine_tuning.jobs import CheckpointRetrieveResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestCheckpoints:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- checkpoint = client.fine_tuning.jobs.checkpoints.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- checkpoint = client.fine_tuning.jobs.checkpoints.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- )
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- checkpoint = response.parse()
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.checkpoints.with_streaming_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- checkpoint = response.parse()
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve(
- fine_tuning_job_id="",
- )
-
-
-class TestAsyncCheckpoints:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- checkpoint = await async_client.fine_tuning.jobs.checkpoints.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- checkpoint = await async_client.fine_tuning.jobs.checkpoints.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- )
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- checkpoint = await response.parse()
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.checkpoints.with_streaming_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- checkpoint = await response.parse()
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- await async_client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve(
- fine_tuning_job_id="",
- )
diff --git a/tests/api_resources/fine_tuning/jobs/test_events.py b/tests/api_resources/fine_tuning/jobs/test_events.py
deleted file mode 100644
index 39802767..00000000
--- a/tests/api_resources/fine_tuning/jobs/test_events.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.fine_tuning.jobs import EventRetrieveResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestEvents:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- event = client.fine_tuning.jobs.events.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- event = client.fine_tuning.jobs.events.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- )
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.events.with_raw_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- event = response.parse()
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.events.with_streaming_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- event = response.parse()
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- client.fine_tuning.jobs.events.with_raw_response.retrieve(
- fine_tuning_job_id="",
- )
-
-
-class TestAsyncEvents:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- event = await async_client.fine_tuning.jobs.events.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- event = await async_client.fine_tuning.jobs.events.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- )
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.events.with_raw_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- event = await response.parse()
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.events.with_streaming_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- event = await response.parse()
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- await async_client.fine_tuning.jobs.events.with_raw_response.retrieve(
- fine_tuning_job_id="",
- )
diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py
deleted file mode 100644
index f0014f09..00000000
--- a/tests/api_resources/fine_tuning/test_jobs.py
+++ /dev/null
@@ -1,437 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.fine_tuning import (
- FineTuningJob,
- JobListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestJobs:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- hyperparameters={
- "batch_size": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- },
- integrations=[
- {
- "type": "wandb",
- "wandb": {
- "project": "my-wandb-project",
- "entity": "entity",
- "name": "name",
- "tags": ["custom-tag"],
- },
- }
- ],
- metadata={"foo": "string"},
- method={
- "dpo": {
- "hyperparameters": {
- "batch_size": "auto",
- "beta": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- }
- },
- "supervised": {
- "hyperparameters": {
- "batch_size": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- }
- },
- "type": "supervised",
- },
- seed=42,
- suffix="x",
- validation_file="file-abc123",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.with_raw_response.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.with_streaming_response.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.with_raw_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.with_streaming_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- client.fine_tuning.jobs.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.list()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.list(
- after="after",
- limit=0,
- metadata={"foo": "string"},
- )
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.with_raw_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.with_streaming_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- client.fine_tuning.jobs.with_raw_response.cancel(
- "",
- )
-
-
-class TestAsyncJobs:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- hyperparameters={
- "batch_size": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- },
- integrations=[
- {
- "type": "wandb",
- "wandb": {
- "project": "my-wandb-project",
- "entity": "entity",
- "name": "name",
- "tags": ["custom-tag"],
- },
- }
- ],
- metadata={"foo": "string"},
- method={
- "dpo": {
- "hyperparameters": {
- "batch_size": "auto",
- "beta": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- }
- },
- "supervised": {
- "hyperparameters": {
- "batch_size": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- }
- },
- "type": "supervised",
- },
- seed=42,
- suffix="x",
- validation_file="file-abc123",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.with_raw_response.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.with_streaming_response.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.with_raw_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.with_streaming_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- await async_client.fine_tuning.jobs.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.list()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.list(
- after="after",
- limit=0,
- metadata={"foo": "string"},
- )
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.with_raw_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.with_streaming_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- await async_client.fine_tuning.jobs.with_raw_response.cancel(
- "",
- )
diff --git a/tests/api_resources/organization/__init__.py b/tests/api_resources/organization/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/organization/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/organization/projects/__init__.py b/tests/api_resources/organization/projects/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/organization/projects/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/organization/projects/test_api_keys.py b/tests/api_resources/organization/projects/test_api_keys.py
deleted file mode 100644
index d8c6bbc0..00000000
--- a/tests/api_resources/organization/projects/test_api_keys.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization.projects import (
- APIKey,
- APIKeyListResponse,
- APIKeyDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAPIKeys:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- api_key = client.organization.projects.api_keys.retrieve(
- key_id="key_id",
- project_id="project_id",
- )
- assert_matches_type(APIKey, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="key_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = response.parse()
- assert_matches_type(APIKey, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.api_keys.with_streaming_response.retrieve(
- key_id="key_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = response.parse()
- assert_matches_type(APIKey, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="key_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- api_key = client.organization.projects.api_keys.list(
- project_id="project_id",
- )
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- api_key = client.organization.projects.api_keys.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.api_keys.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = response.parse()
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.api_keys.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = response.parse()
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- api_key = client.organization.projects.api_keys.delete(
- key_id="key_id",
- project_id="project_id",
- )
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.api_keys.with_raw_response.delete(
- key_id="key_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = response.parse()
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.api_keys.with_streaming_response.delete(
- key_id="key_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = response.parse()
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.delete(
- key_id="key_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.delete(
- key_id="",
- project_id="project_id",
- )
-
-
-class TestAsyncAPIKeys:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- api_key = await async_client.organization.projects.api_keys.retrieve(
- key_id="key_id",
- project_id="project_id",
- )
- assert_matches_type(APIKey, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="key_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = await response.parse()
- assert_matches_type(APIKey, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.api_keys.with_streaming_response.retrieve(
- key_id="key_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = await response.parse()
- assert_matches_type(APIKey, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="key_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- api_key = await async_client.organization.projects.api_keys.list(
- project_id="project_id",
- )
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- api_key = await async_client.organization.projects.api_keys.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.api_keys.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = await response.parse()
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.api_keys.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = await response.parse()
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- api_key = await async_client.organization.projects.api_keys.delete(
- key_id="key_id",
- project_id="project_id",
- )
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.api_keys.with_raw_response.delete(
- key_id="key_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = await response.parse()
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.api_keys.with_streaming_response.delete(
- key_id="key_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = await response.parse()
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.delete(
- key_id="key_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.delete(
- key_id="",
- project_id="project_id",
- )
diff --git a/tests/api_resources/organization/projects/test_rate_limits.py b/tests/api_resources/organization/projects/test_rate_limits.py
deleted file mode 100644
index 3f7688b4..00000000
--- a/tests/api_resources/organization/projects/test_rate_limits.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization.projects import (
- RateLimit,
- RateLimitListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestRateLimits:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- rate_limit = client.organization.projects.rate_limits.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- )
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- rate_limit = client.organization.projects.rate_limits.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- batch_1_day_max_input_tokens=0,
- max_audio_megabytes_per_1_minute=0,
- max_images_per_1_minute=0,
- max_requests_per_1_day=0,
- max_requests_per_1_minute=0,
- max_tokens_per_1_minute=0,
- )
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- rate_limit = response.parse()
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.rate_limits.with_streaming_response.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- rate_limit = response.parse()
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="rate_limit_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"):
- client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- rate_limit = client.organization.projects.rate_limits.list(
- project_id="project_id",
- )
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- rate_limit = client.organization.projects.rate_limits.list(
- project_id="project_id",
- after="after",
- before="before",
- limit=0,
- )
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.rate_limits.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- rate_limit = response.parse()
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.rate_limits.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- rate_limit = response.parse()
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.rate_limits.with_raw_response.list(
- project_id="",
- )
-
-
-class TestAsyncRateLimits:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- rate_limit = await async_client.organization.projects.rate_limits.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- )
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- rate_limit = await async_client.organization.projects.rate_limits.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- batch_1_day_max_input_tokens=0,
- max_audio_megabytes_per_1_minute=0,
- max_images_per_1_minute=0,
- max_requests_per_1_day=0,
- max_requests_per_1_minute=0,
- max_tokens_per_1_minute=0,
- )
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- rate_limit = await response.parse()
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.rate_limits.with_streaming_response.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- rate_limit = await response.parse()
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="rate_limit_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"):
- await async_client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- rate_limit = await async_client.organization.projects.rate_limits.list(
- project_id="project_id",
- )
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- rate_limit = await async_client.organization.projects.rate_limits.list(
- project_id="project_id",
- after="after",
- before="before",
- limit=0,
- )
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.rate_limits.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- rate_limit = await response.parse()
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.rate_limits.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- rate_limit = await response.parse()
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.rate_limits.with_raw_response.list(
- project_id="",
- )
diff --git a/tests/api_resources/organization/projects/test_service_accounts.py b/tests/api_resources/organization/projects/test_service_accounts.py
deleted file mode 100644
index 4cbdbd38..00000000
--- a/tests/api_resources/organization/projects/test_service_accounts.py
+++ /dev/null
@@ -1,431 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization.projects import (
- ServiceAccount,
- ServiceAccountListResponse,
- ServiceAccountCreateResponse,
- ServiceAccountDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestServiceAccounts:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.create(
- project_id="project_id",
- name="name",
- )
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.service_accounts.with_raw_response.create(
- project_id="project_id",
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = response.parse()
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.service_accounts.with_streaming_response.create(
- project_id="project_id",
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = response.parse()
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.create(
- project_id="",
- name="name",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- )
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = response.parse()
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.service_accounts.with_streaming_response.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = response.parse()
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="service_account_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.list(
- project_id="project_id",
- )
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.service_accounts.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = response.parse()
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.service_accounts.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = response.parse()
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- )
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = response.parse()
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.service_accounts.with_streaming_response.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = response.parse()
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="service_account_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="",
- project_id="project_id",
- )
-
-
-class TestAsyncServiceAccounts:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.create(
- project_id="project_id",
- name="name",
- )
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.service_accounts.with_raw_response.create(
- project_id="project_id",
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = await response.parse()
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.service_accounts.with_streaming_response.create(
- project_id="project_id",
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = await response.parse()
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.create(
- project_id="",
- name="name",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- )
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = await response.parse()
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.service_accounts.with_streaming_response.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = await response.parse()
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="service_account_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.list(
- project_id="project_id",
- )
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.service_accounts.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = await response.parse()
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.service_accounts.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = await response.parse()
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- )
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = await response.parse()
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.service_accounts.with_streaming_response.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = await response.parse()
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="service_account_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="",
- project_id="project_id",
- )
diff --git a/tests/api_resources/organization/projects/test_users.py b/tests/api_resources/organization/projects/test_users.py
deleted file mode 100644
index df2a136e..00000000
--- a/tests/api_resources/organization/projects/test_users.py
+++ /dev/null
@@ -1,552 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization.projects import (
- ProjectUser,
- UserListResponse,
- UserDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestUsers:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.retrieve(
- user_id="user_id",
- project_id="project_id",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.retrieve(
- user_id="user_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.retrieve(
- user_id="user_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.retrieve(
- user_id="user_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.projects.users.with_raw_response.retrieve(
- user_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.update(
- user_id="user_id",
- project_id="",
- role="owner",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.projects.users.with_raw_response.update(
- user_id="",
- project_id="project_id",
- role="owner",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.list(
- project_id="project_id",
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.delete(
- user_id="user_id",
- project_id="project_id",
- )
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.delete(
- user_id="user_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.delete(
- user_id="user_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.delete(
- user_id="user_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.projects.users.with_raw_response.delete(
- user_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_add(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_add(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.add(
- project_id="",
- role="owner",
- user_id="user_id",
- )
-
-
-class TestAsyncUsers:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.retrieve(
- user_id="user_id",
- project_id="project_id",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.retrieve(
- user_id="user_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.retrieve(
- user_id="user_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.retrieve(
- user_id="user_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.retrieve(
- user_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.update(
- user_id="user_id",
- project_id="",
- role="owner",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.update(
- user_id="",
- project_id="project_id",
- role="owner",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.list(
- project_id="project_id",
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.delete(
- user_id="user_id",
- project_id="project_id",
- )
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.delete(
- user_id="user_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.delete(
- user_id="user_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.delete(
- user_id="user_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.delete(
- user_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.add(
- project_id="",
- role="owner",
- user_id="user_id",
- )
diff --git a/tests/api_resources/organization/test_admin_api_keys.py b/tests/api_resources/organization/test_admin_api_keys.py
deleted file mode 100644
index 0e0949a1..00000000
--- a/tests/api_resources/organization/test_admin_api_keys.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization import (
- AdminAPIKey,
- AdminAPIKeyListResponse,
- AdminAPIKeyDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAdminAPIKeys:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.create(
- name="New Admin Key",
- )
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.admin_api_keys.with_raw_response.create(
- name="New Admin Key",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.admin_api_keys.with_streaming_response.create(
- name="New Admin Key",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.retrieve(
- "key_id",
- )
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.admin_api_keys.with_raw_response.retrieve(
- "key_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.admin_api_keys.with_streaming_response.retrieve(
- "key_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- client.organization.admin_api_keys.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.list()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.list(
- after="after",
- limit=0,
- order="asc",
- )
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.admin_api_keys.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.admin_api_keys.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.delete(
- "key_id",
- )
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.admin_api_keys.with_raw_response.delete(
- "key_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.admin_api_keys.with_streaming_response.delete(
- "key_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- client.organization.admin_api_keys.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncAdminAPIKeys:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.create(
- name="New Admin Key",
- )
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.admin_api_keys.with_raw_response.create(
- name="New Admin Key",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.admin_api_keys.with_streaming_response.create(
- name="New Admin Key",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.retrieve(
- "key_id",
- )
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.admin_api_keys.with_raw_response.retrieve(
- "key_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.admin_api_keys.with_streaming_response.retrieve(
- "key_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- await async_client.organization.admin_api_keys.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.list()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.list(
- after="after",
- limit=0,
- order="asc",
- )
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.admin_api_keys.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.admin_api_keys.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.delete(
- "key_id",
- )
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.admin_api_keys.with_raw_response.delete(
- "key_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.admin_api_keys.with_streaming_response.delete(
- "key_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- await async_client.organization.admin_api_keys.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/organization/test_invites.py b/tests/api_resources/organization/test_invites.py
deleted file mode 100644
index 73528d26..00000000
--- a/tests/api_resources/organization/test_invites.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization import (
- Invite,
- InviteListResponse,
- InviteDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestInvites:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.create(
- email="email",
- role="reader",
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.create(
- email="email",
- role="reader",
- projects=[
- {
- "id": "id",
- "role": "member",
- }
- ],
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.invites.with_raw_response.create(
- email="email",
- role="reader",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.invites.with_streaming_response.create(
- email="email",
- role="reader",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.retrieve(
- "invite_id",
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.invites.with_raw_response.retrieve(
- "invite_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.invites.with_streaming_response.retrieve(
- "invite_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
- client.organization.invites.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.list()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.list(
- after="after",
- limit=0,
- )
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.invites.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = response.parse()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.invites.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = response.parse()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.delete(
- "invite_id",
- )
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.invites.with_raw_response.delete(
- "invite_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = response.parse()
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.invites.with_streaming_response.delete(
- "invite_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = response.parse()
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
- client.organization.invites.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncInvites:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.create(
- email="email",
- role="reader",
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.create(
- email="email",
- role="reader",
- projects=[
- {
- "id": "id",
- "role": "member",
- }
- ],
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.invites.with_raw_response.create(
- email="email",
- role="reader",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = await response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.invites.with_streaming_response.create(
- email="email",
- role="reader",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = await response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.retrieve(
- "invite_id",
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.invites.with_raw_response.retrieve(
- "invite_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = await response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.invites.with_streaming_response.retrieve(
- "invite_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = await response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
- await async_client.organization.invites.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.list()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.list(
- after="after",
- limit=0,
- )
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.invites.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = await response.parse()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.invites.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = await response.parse()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.delete(
- "invite_id",
- )
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.invites.with_raw_response.delete(
- "invite_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = await response.parse()
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.invites.with_streaming_response.delete(
- "invite_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = await response.parse()
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
- await async_client.organization.invites.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/organization/test_projects.py b/tests/api_resources/organization/test_projects.py
deleted file mode 100644
index 6b9dd9a4..00000000
--- a/tests/api_resources/organization/test_projects.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization import (
- Project,
- ProjectListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestProjects:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.create(
- name="name",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.create(
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.create(
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.retrieve(
- "project_id",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.retrieve(
- "project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.retrieve(
- "project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.update(
- project_id="project_id",
- name="name",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.update(
- project_id="project_id",
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.update(
- project_id="project_id",
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.with_raw_response.update(
- project_id="",
- name="name",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.list()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.list(
- after="after",
- include_archived=True,
- limit=0,
- )
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_archive(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.archive(
- "project_id",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_archive(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.archive(
- "project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_archive(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.archive(
- "project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_archive(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.with_raw_response.archive(
- "",
- )
-
-
-class TestAsyncProjects:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.create(
- name="name",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.create(
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.create(
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.retrieve(
- "project_id",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.retrieve(
- "project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.retrieve(
- "project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.update(
- project_id="project_id",
- name="name",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.update(
- project_id="project_id",
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.update(
- project_id="project_id",
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.with_raw_response.update(
- project_id="",
- name="name",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.list()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.list(
- after="after",
- include_archived=True,
- limit=0,
- )
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.archive(
- "project_id",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.archive(
- "project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.archive(
- "project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.with_raw_response.archive(
- "",
- )
diff --git a/tests/api_resources/organization/test_usage.py b/tests/api_resources/organization/test_usage.py
deleted file mode 100644
index 198f2159..00000000
--- a/tests/api_resources/organization/test_usage.py
+++ /dev/null
@@ -1,834 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import UsageResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestUsage:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.audio_speeches(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_audio_speeches_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.audio_speeches(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.audio_speeches(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.audio_speeches(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.audio_transcriptions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_audio_transcriptions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.audio_transcriptions(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.audio_transcriptions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.audio_transcriptions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.code_interpreter_sessions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_code_interpreter_sessions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.code_interpreter_sessions(
- start_time=0,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.code_interpreter_sessions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.code_interpreter_sessions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_completions(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.completions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_completions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.completions(
- start_time=0,
- api_key_ids=["string"],
- batch=True,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_completions(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.completions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_completions(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.completions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_embeddings(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.embeddings(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_embeddings_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.embeddings(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_embeddings(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.embeddings(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_embeddings(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.embeddings(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_images(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.images(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_images_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.images(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- sizes=["256x256"],
- sources=["image.generation"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_images(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.images(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_images(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.images(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_moderations(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.moderations(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_moderations_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.moderations(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_moderations(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.moderations(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_moderations(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.moderations(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_vector_stores(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.vector_stores(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_vector_stores_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.vector_stores(
- start_time=0,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_vector_stores(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.vector_stores(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_vector_stores(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.vector_stores(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncUsage:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.audio_speeches(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_audio_speeches_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.audio_speeches(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.audio_speeches(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.audio_speeches(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.audio_transcriptions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_audio_transcriptions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.audio_transcriptions(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.audio_transcriptions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.audio_transcriptions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.code_interpreter_sessions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_code_interpreter_sessions_with_all_params(
- self, async_client: AsyncDigitaloceanGenaiSDK
- ) -> None:
- usage = await async_client.organization.usage.code_interpreter_sessions(
- start_time=0,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.code_interpreter_sessions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.code_interpreter_sessions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.completions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_completions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.completions(
- start_time=0,
- api_key_ids=["string"],
- batch=True,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.completions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.completions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.embeddings(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_embeddings_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.embeddings(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.embeddings(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.embeddings(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.images(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_images_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.images(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- sizes=["256x256"],
- sources=["image.generation"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.images(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.images(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.moderations(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_moderations_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.moderations(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.moderations(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.moderations(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.vector_stores(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_vector_stores_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.vector_stores(
- start_time=0,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.vector_stores(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.vector_stores(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/organization/test_users.py b/tests/api_resources/organization/test_users.py
deleted file mode 100644
index b40fcbef..00000000
--- a/tests/api_resources/organization/test_users.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization import (
- OrganizationUser,
- UserListResponse,
- UserDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestUsers:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.retrieve(
- "user_id",
- )
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.users.with_raw_response.retrieve(
- "user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.users.with_streaming_response.retrieve(
- "user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.users.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.update(
- user_id="user_id",
- role="owner",
- )
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.users.with_raw_response.update(
- user_id="user_id",
- role="owner",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.users.with_streaming_response.update(
- user_id="user_id",
- role="owner",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.users.with_raw_response.update(
- user_id="",
- role="owner",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.list()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.list(
- after="after",
- emails=["string"],
- limit=0,
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.users.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.users.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.delete(
- "user_id",
- )
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.users.with_raw_response.delete(
- "user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.users.with_streaming_response.delete(
- "user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.users.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncUsers:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.retrieve(
- "user_id",
- )
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.users.with_raw_response.retrieve(
- "user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.users.with_streaming_response.retrieve(
- "user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.users.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.update(
- user_id="user_id",
- role="owner",
- )
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.users.with_raw_response.update(
- user_id="user_id",
- role="owner",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.users.with_streaming_response.update(
- user_id="user_id",
- role="owner",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.users.with_raw_response.update(
- user_id="",
- role="owner",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.list()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.list(
- after="after",
- emails=["string"],
- limit=0,
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.users.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.users.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.delete(
- "user_id",
- )
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.users.with_raw_response.delete(
- "user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.users.with_streaming_response.delete(
- "user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.users.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/test_assistants.py b/tests/api_resources/test_assistants.py
deleted file mode 100644
index a5fa998d..00000000
--- a/tests/api_resources/test_assistants.py
+++ /dev/null
@@ -1,528 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- AssistantObject,
- AssistantListResponse,
- AssistantDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAssistants:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.create(
- model="gpt-4o",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.create(
- model="gpt-4o",
- description="description",
- instructions="instructions",
- metadata={"foo": "string"},
- name="name",
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.create(
- model="gpt-4o",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.create(
- model="gpt-4o",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.retrieve(
- "assistant_id",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.retrieve(
- "assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.retrieve(
- "assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.assistants.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.update(
- assistant_id="assistant_id",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.update(
- assistant_id="assistant_id",
- description="description",
- instructions="instructions",
- metadata={"foo": "string"},
- model="string",
- name="name",
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.update(
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.update(
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.assistants.with_raw_response.update(
- assistant_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.list()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.list(
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.delete(
- "assistant_id",
- )
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.delete(
- "assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.delete(
- "assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.assistants.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncAssistants:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.create(
- model="gpt-4o",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.create(
- model="gpt-4o",
- description="description",
- instructions="instructions",
- metadata={"foo": "string"},
- name="name",
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.create(
- model="gpt-4o",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.create(
- model="gpt-4o",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.retrieve(
- "assistant_id",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.retrieve(
- "assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.retrieve(
- "assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.assistants.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.update(
- assistant_id="assistant_id",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.update(
- assistant_id="assistant_id",
- description="description",
- instructions="instructions",
- metadata={"foo": "string"},
- model="string",
- name="name",
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.update(
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.update(
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.assistants.with_raw_response.update(
- assistant_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.list()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.list(
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.delete(
- "assistant_id",
- )
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.delete(
- "assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.delete(
- "assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.assistants.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/test_audio.py b/tests/api_resources/test_audio.py
deleted file mode 100644
index e71d568e..00000000
--- a/tests/api_resources/test_audio.py
+++ /dev/null
@@ -1,383 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import httpx
-import pytest
-from respx import MockRouter
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- AudioTranslateAudioResponse,
- AudioTranscribeAudioResponse,
-)
-from digitalocean_genai_sdk._response import (
- BinaryAPIResponse,
- AsyncBinaryAPIResponse,
- StreamedBinaryAPIResponse,
- AsyncStreamedBinaryAPIResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAudio:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_method_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- audio = client.audio.generate_speech(
- input="input",
- model="string",
- voice="ash",
- )
- assert audio.is_closed
- assert audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, BinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_method_generate_speech_with_all_params(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- audio = client.audio.generate_speech(
- input="input",
- model="string",
- voice="ash",
- instructions="instructions",
- response_format="mp3",
- speed=0.25,
- )
- assert audio.is_closed
- assert audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, BinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_raw_response_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
-
- audio = client.audio.with_raw_response.generate_speech(
- input="input",
- model="string",
- voice="ash",
- )
-
- assert audio.is_closed is True
- assert audio.http_request.headers.get("X-Stainless-Lang") == "python"
- assert audio.json() == {"foo": "bar"}
- assert isinstance(audio, BinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_streaming_response_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- with client.audio.with_streaming_response.generate_speech(
- input="input",
- model="string",
- voice="ash",
- ) as audio:
- assert not audio.is_closed
- assert audio.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assert audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, StreamedBinaryAPIResponse)
-
- assert cast(Any, audio.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None:
- audio = client.audio.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- )
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_transcribe_audio_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- audio = client.audio.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- include=["logprobs"],
- language="language",
- prompt="prompt",
- response_format="json",
- stream=True,
- temperature=0,
- timestamp_granularities=["word"],
- )
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.audio.with_raw_response.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- audio = response.parse()
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None:
- with client.audio.with_streaming_response.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- audio = response.parse()
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_translate_audio(self, client: DigitaloceanGenaiSDK) -> None:
- audio = client.audio.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- )
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_translate_audio_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- audio = client.audio.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- prompt="prompt",
- response_format="json",
- temperature=0,
- )
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_translate_audio(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.audio.with_raw_response.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- audio = response.parse()
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_translate_audio(self, client: DigitaloceanGenaiSDK) -> None:
- with client.audio.with_streaming_response.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- audio = response.parse()
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncAudio:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_method_generate_speech(
- self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter
- ) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- audio = await async_client.audio.generate_speech(
- input="input",
- model="string",
- voice="ash",
- )
- assert audio.is_closed
- assert await audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, AsyncBinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_method_generate_speech_with_all_params(
- self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter
- ) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- audio = await async_client.audio.generate_speech(
- input="input",
- model="string",
- voice="ash",
- instructions="instructions",
- response_format="mp3",
- speed=0.25,
- )
- assert audio.is_closed
- assert await audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, AsyncBinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_raw_response_generate_speech(
- self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter
- ) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
-
- audio = await async_client.audio.with_raw_response.generate_speech(
- input="input",
- model="string",
- voice="ash",
- )
-
- assert audio.is_closed is True
- assert audio.http_request.headers.get("X-Stainless-Lang") == "python"
- assert await audio.json() == {"foo": "bar"}
- assert isinstance(audio, AsyncBinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_streaming_response_generate_speech(
- self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter
- ) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- async with async_client.audio.with_streaming_response.generate_speech(
- input="input",
- model="string",
- voice="ash",
- ) as audio:
- assert not audio.is_closed
- assert audio.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assert await audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, AsyncStreamedBinaryAPIResponse)
-
- assert cast(Any, audio.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- audio = await async_client.audio.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- )
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_transcribe_audio_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- audio = await async_client.audio.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- include=["logprobs"],
- language="language",
- prompt="prompt",
- response_format="json",
- stream=True,
- temperature=0,
- timestamp_granularities=["word"],
- )
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.audio.with_raw_response.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- audio = await response.parse()
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.audio.with_streaming_response.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- audio = await response.parse()
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- audio = await async_client.audio.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- )
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_translate_audio_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- audio = await async_client.audio.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- prompt="prompt",
- response_format="json",
- temperature=0,
- )
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.audio.with_raw_response.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- audio = await response.parse()
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.audio.with_streaming_response.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- audio = await response.parse()
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py
deleted file mode 100644
index 6ad0bbee..00000000
--- a/tests/api_resources/test_batches.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import Batch, BatchListResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestBatches:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.batches.with_raw_response.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.batches.with_streaming_response.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.retrieve(
- "batch_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.batches.with_raw_response.retrieve(
- "batch_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.batches.with_streaming_response.retrieve(
- "batch_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.batches.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.list()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.list(
- after="after",
- limit=0,
- )
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.batches.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = response.parse()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.batches.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = response.parse()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.cancel(
- "batch_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.batches.with_raw_response.cancel(
- "batch_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.batches.with_streaming_response.cancel(
- "batch_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.batches.with_raw_response.cancel(
- "",
- )
-
-
-class TestAsyncBatches:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.batches.with_raw_response.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.batches.with_streaming_response.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.retrieve(
- "batch_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.batches.with_raw_response.retrieve(
- "batch_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.batches.with_streaming_response.retrieve(
- "batch_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.batches.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.list()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.list(
- after="after",
- limit=0,
- )
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.batches.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = await response.parse()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.batches.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = await response.parse()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.cancel(
- "batch_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.batches.with_raw_response.cancel(
- "batch_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.batches.with_streaming_response.cancel(
- "batch_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.batches.with_raw_response.cancel(
- "",
- )
diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py
deleted file mode 100644
index eb5c1abd..00000000
--- a/tests/api_resources/test_completions.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- CompletionCreateResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestCompletions:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.completions.create(
- model="string",
- prompt="This is a test.",
- )
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.completions.create(
- model="string",
- prompt="This is a test.",
- best_of=0,
- echo=True,
- frequency_penalty=-2,
- logit_bias={"foo": 0},
- logprobs=0,
- max_tokens=16,
- n=1,
- presence_penalty=-2,
- seed=0,
- stop="\n",
- stream=True,
- stream_options={"include_usage": True},
- suffix="test.",
- temperature=1,
- top_p=1,
- user="user-1234",
- )
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.completions.with_raw_response.create(
- model="string",
- prompt="This is a test.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.completions.with_streaming_response.create(
- model="string",
- prompt="This is a test.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncCompletions:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.completions.create(
- model="string",
- prompt="This is a test.",
- )
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.completions.create(
- model="string",
- prompt="This is a test.",
- best_of=0,
- echo=True,
- frequency_penalty=-2,
- logit_bias={"foo": 0},
- logprobs=0,
- max_tokens=16,
- n=1,
- presence_penalty=-2,
- seed=0,
- stop="\n",
- stream=True,
- stream_options={"include_usage": True},
- suffix="test.",
- temperature=1,
- top_p=1,
- user="user-1234",
- )
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.completions.with_raw_response.create(
- model="string",
- prompt="This is a test.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.completions.with_streaming_response.create(
- model="string",
- prompt="This is a test.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py
index bd3ef322..ea1b5879 100644
--- a/tests/api_resources/test_embeddings.py
+++ b/tests/api_resources/test_embeddings.py
@@ -32,8 +32,6 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No
embedding = client.embeddings.create(
input="The quick brown fox jumped over the lazy dog",
model="text-embedding-3-small",
- dimensions=1,
- encoding_format="float",
user="user-1234",
)
assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
@@ -85,8 +83,6 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce
embedding = await async_client.embeddings.create(
input="The quick brown fox jumped over the lazy dog",
model="text-embedding-3-small",
- dimensions=1,
- encoding_format="float",
user="user-1234",
)
assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py
deleted file mode 100644
index b30ae859..00000000
--- a/tests/api_resources/test_files.py
+++ /dev/null
@@ -1,430 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- OpenAIFile,
- FileListResponse,
- FileDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestFiles:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.retrieve(
- "file_id",
- )
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.retrieve(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.retrieve(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.files.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.list()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.list(
- after="after",
- limit=0,
- order="asc",
- purpose="purpose",
- )
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.delete(
- "file_id",
- )
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.delete(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.delete(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.files.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.retrieve_content(
- "file_id",
- )
- assert_matches_type(str, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.retrieve_content(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(str, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.retrieve_content(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(str, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.files.with_raw_response.retrieve_content(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_upload(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.upload(
- file=b"raw file contents",
- purpose="assistants",
- )
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_upload(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.upload(
- file=b"raw file contents",
- purpose="assistants",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_upload(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.upload(
- file=b"raw file contents",
- purpose="assistants",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncFiles:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.retrieve(
- "file_id",
- )
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.retrieve(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.retrieve(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.files.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.list()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.list(
- after="after",
- limit=0,
- order="asc",
- purpose="purpose",
- )
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.delete(
- "file_id",
- )
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.delete(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.delete(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.files.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.retrieve_content(
- "file_id",
- )
- assert_matches_type(str, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.retrieve_content(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(str, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.retrieve_content(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(str, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.files.with_raw_response.retrieve_content(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.upload(
- file=b"raw file contents",
- purpose="assistants",
- )
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.upload(
- file=b"raw file contents",
- purpose="assistants",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.upload(
- file=b"raw file contents",
- purpose="assistants",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py
deleted file mode 100644
index 380a0759..00000000
--- a/tests/api_resources/test_images.py
+++ /dev/null
@@ -1,320 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- ImagesResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestImages:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_edit(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_edit_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- mask=b"raw file contents",
- model="dall-e-2",
- n=1,
- response_format="url",
- size="1024x1024",
- user="user-1234",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_edit(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.images.with_raw_response.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_edit(self, client: DigitaloceanGenaiSDK) -> None:
- with client.images.with_streaming_response.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_generation(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_generation(
- prompt="A cute baby sea otter",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_generation_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_generation(
- prompt="A cute baby sea otter",
- model="dall-e-3",
- n=1,
- quality="standard",
- response_format="url",
- size="1024x1024",
- style="vivid",
- user="user-1234",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_generation(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.images.with_raw_response.create_generation(
- prompt="A cute baby sea otter",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_generation(self, client: DigitaloceanGenaiSDK) -> None:
- with client.images.with_streaming_response.create_generation(
- prompt="A cute baby sea otter",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_variation(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_variation(
- image=b"raw file contents",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_variation_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_variation(
- image=b"raw file contents",
- model="dall-e-2",
- n=1,
- response_format="url",
- size="1024x1024",
- user="user-1234",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_variation(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.images.with_raw_response.create_variation(
- image=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_variation(self, client: DigitaloceanGenaiSDK) -> None:
- with client.images.with_streaming_response.create_variation(
- image=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncImages:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_edit_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- mask=b"raw file contents",
- model="dall-e-2",
- n=1,
- response_format="url",
- size="1024x1024",
- user="user-1234",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.images.with_raw_response.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.images.with_streaming_response.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_generation(
- prompt="A cute baby sea otter",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_generation_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_generation(
- prompt="A cute baby sea otter",
- model="dall-e-3",
- n=1,
- quality="standard",
- response_format="url",
- size="1024x1024",
- style="vivid",
- user="user-1234",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.images.with_raw_response.create_generation(
- prompt="A cute baby sea otter",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.images.with_streaming_response.create_generation(
- prompt="A cute baby sea otter",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_variation(
- image=b"raw file contents",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_variation_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_variation(
- image=b"raw file contents",
- model="dall-e-2",
- n=1,
- response_format="url",
- size="1024x1024",
- user="user-1234",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.images.with_raw_response.create_variation(
- image=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.images.with_streaming_response.create_variation(
- image=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py
index aa215415..1148affb 100644
--- a/tests/api_resources/test_models.py
+++ b/tests/api_resources/test_models.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import Model, ModelListResponse, ModelDeleteResponse
+from digitalocean_genai_sdk.types import Model, ModelListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -21,7 +21,7 @@ class TestModels:
@parametrize
def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
model = client.models.retrieve(
- "gpt-4o-mini",
+ "llama3-8b-instruct",
)
assert_matches_type(Model, model, path=["response"])
@@ -29,7 +29,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
@parametrize
def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
response = client.models.with_raw_response.retrieve(
- "gpt-4o-mini",
+ "llama3-8b-instruct",
)
assert response.is_closed is True
@@ -41,7 +41,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
@parametrize
def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
with client.models.with_streaming_response.retrieve(
- "gpt-4o-mini",
+ "llama3-8b-instruct",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -87,48 +87,6 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- model = client.models.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- )
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.models.with_raw_response.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = response.parse()
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.models.with_streaming_response.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = response.parse()
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- client.models.with_raw_response.delete(
- "",
- )
-
class TestAsyncModels:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
@@ -137,7 +95,7 @@ class TestAsyncModels:
@parametrize
async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
model = await async_client.models.retrieve(
- "gpt-4o-mini",
+ "llama3-8b-instruct",
)
assert_matches_type(Model, model, path=["response"])
@@ -145,7 +103,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) ->
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
response = await async_client.models.with_raw_response.retrieve(
- "gpt-4o-mini",
+ "llama3-8b-instruct",
)
assert response.is_closed is True
@@ -157,7 +115,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
async with async_client.models.with_streaming_response.retrieve(
- "gpt-4o-mini",
+ "llama3-8b-instruct",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -202,45 +160,3 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena
assert_matches_type(ModelListResponse, model, path=["response"])
assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- model = await async_client.models.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- )
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.models.with_raw_response.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = await response.parse()
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.models.with_streaming_response.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = await response.parse()
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- await async_client.models.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py
deleted file mode 100644
index 79d34625..00000000
--- a/tests/api_resources/test_moderations.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import ModerationClassifyResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestModerations:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_classify(self, client: DigitaloceanGenaiSDK) -> None:
- moderation = client.moderations.classify(
- input="I want to kill them.",
- )
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_classify_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- moderation = client.moderations.classify(
- input="I want to kill them.",
- model="omni-moderation-2024-09-26",
- )
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_classify(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.moderations.with_raw_response.classify(
- input="I want to kill them.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- moderation = response.parse()
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_classify(self, client: DigitaloceanGenaiSDK) -> None:
- with client.moderations.with_streaming_response.classify(
- input="I want to kill them.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- moderation = response.parse()
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncModerations:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- moderation = await async_client.moderations.classify(
- input="I want to kill them.",
- )
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_classify_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- moderation = await async_client.moderations.classify(
- input="I want to kill them.",
- model="omni-moderation-2024-09-26",
- )
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.moderations.with_raw_response.classify(
- input="I want to kill them.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- moderation = await response.parse()
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.moderations.with_streaming_response.classify(
- input="I want to kill them.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- moderation = await response.parse()
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_organization.py b/tests/api_resources/test_organization.py
deleted file mode 100644
index 844ed287..00000000
--- a/tests/api_resources/test_organization.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- UsageResponse,
- OrganizationListAuditLogsResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestOrganization:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_get_costs(self, client: DigitaloceanGenaiSDK) -> None:
- organization = client.organization.get_costs(
- start_time=0,
- )
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_get_costs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- organization = client.organization.get_costs(
- start_time=0,
- bucket_width="1d",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_get_costs(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.with_raw_response.get_costs(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- organization = response.parse()
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_get_costs(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.with_streaming_response.get_costs(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- organization = response.parse()
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None:
- organization = client.organization.list_audit_logs()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_audit_logs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- organization = client.organization.list_audit_logs(
- actor_emails=["string"],
- actor_ids=["string"],
- after="after",
- before="before",
- effective_at={
- "gt": 0,
- "gte": 0,
- "lt": 0,
- "lte": 0,
- },
- event_types=["api_key.created"],
- limit=0,
- project_ids=["string"],
- resource_ids=["string"],
- )
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.with_raw_response.list_audit_logs()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- organization = response.parse()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.with_streaming_response.list_audit_logs() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- organization = response.parse()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncOrganization:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- organization = await async_client.organization.get_costs(
- start_time=0,
- )
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_get_costs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- organization = await async_client.organization.get_costs(
- start_time=0,
- bucket_width="1d",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.with_raw_response.get_costs(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- organization = await response.parse()
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.with_streaming_response.get_costs(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- organization = await response.parse()
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- organization = await async_client.organization.list_audit_logs()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_audit_logs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- organization = await async_client.organization.list_audit_logs(
- actor_emails=["string"],
- actor_ids=["string"],
- after="after",
- before="before",
- effective_at={
- "gt": 0,
- "gte": 0,
- "lt": 0,
- "lte": 0,
- },
- event_types=["api_key.created"],
- limit=0,
- project_ids=["string"],
- resource_ids=["string"],
- )
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.with_raw_response.list_audit_logs()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- organization = await response.parse()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.with_streaming_response.list_audit_logs() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- organization = await response.parse()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_realtime.py b/tests/api_resources/test_realtime.py
deleted file mode 100644
index 15797ff9..00000000
--- a/tests/api_resources/test_realtime.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- RealtimeCreateSessionResponse,
- RealtimeCreateTranscriptionSessionResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestRealtime:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_session(self, client: DigitaloceanGenaiSDK) -> None:
- realtime = client.realtime.create_session()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_session_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- realtime = client.realtime.create_session(
- input_audio_format="pcm16",
- input_audio_noise_reduction={"type": "near_field"},
- input_audio_transcription={
- "language": "language",
- "model": "model",
- "prompt": "prompt",
- },
- instructions="instructions",
- max_response_output_tokens=0,
- modalities=["text"],
- model="gpt-4o-realtime-preview",
- output_audio_format="pcm16",
- temperature=0,
- tool_choice="tool_choice",
- tools=[
- {
- "description": "description",
- "name": "name",
- "parameters": {},
- "type": "function",
- }
- ],
- turn_detection={
- "create_response": True,
- "eagerness": "low",
- "interrupt_response": True,
- "prefix_padding_ms": 0,
- "silence_duration_ms": 0,
- "threshold": 0,
- "type": "server_vad",
- },
- voice="ash",
- )
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_session(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.realtime.with_raw_response.create_session()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- realtime = response.parse()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_session(self, client: DigitaloceanGenaiSDK) -> None:
- with client.realtime.with_streaming_response.create_session() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- realtime = response.parse()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None:
- realtime = client.realtime.create_transcription_session()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_transcription_session_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- realtime = client.realtime.create_transcription_session(
- include=["string"],
- input_audio_format="pcm16",
- input_audio_noise_reduction={"type": "near_field"},
- input_audio_transcription={
- "language": "language",
- "model": "gpt-4o-transcribe",
- "prompt": "prompt",
- },
- modalities=["text"],
- turn_detection={
- "create_response": True,
- "eagerness": "low",
- "interrupt_response": True,
- "prefix_padding_ms": 0,
- "silence_duration_ms": 0,
- "threshold": 0,
- "type": "server_vad",
- },
- )
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.realtime.with_raw_response.create_transcription_session()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- realtime = response.parse()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None:
- with client.realtime.with_streaming_response.create_transcription_session() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- realtime = response.parse()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncRealtime:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- realtime = await async_client.realtime.create_session()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_session_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- realtime = await async_client.realtime.create_session(
- input_audio_format="pcm16",
- input_audio_noise_reduction={"type": "near_field"},
- input_audio_transcription={
- "language": "language",
- "model": "model",
- "prompt": "prompt",
- },
- instructions="instructions",
- max_response_output_tokens=0,
- modalities=["text"],
- model="gpt-4o-realtime-preview",
- output_audio_format="pcm16",
- temperature=0,
- tool_choice="tool_choice",
- tools=[
- {
- "description": "description",
- "name": "name",
- "parameters": {},
- "type": "function",
- }
- ],
- turn_detection={
- "create_response": True,
- "eagerness": "low",
- "interrupt_response": True,
- "prefix_padding_ms": 0,
- "silence_duration_ms": 0,
- "threshold": 0,
- "type": "server_vad",
- },
- voice="ash",
- )
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.realtime.with_raw_response.create_session()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- realtime = await response.parse()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.realtime.with_streaming_response.create_session() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- realtime = await response.parse()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_transcription_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- realtime = await async_client.realtime.create_transcription_session()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_transcription_session_with_all_params(
- self, async_client: AsyncDigitaloceanGenaiSDK
- ) -> None:
- realtime = await async_client.realtime.create_transcription_session(
- include=["string"],
- input_audio_format="pcm16",
- input_audio_noise_reduction={"type": "near_field"},
- input_audio_transcription={
- "language": "language",
- "model": "gpt-4o-transcribe",
- "prompt": "prompt",
- },
- modalities=["text"],
- turn_detection={
- "create_response": True,
- "eagerness": "low",
- "interrupt_response": True,
- "prefix_padding_ms": 0,
- "silence_duration_ms": 0,
- "threshold": 0,
- "type": "server_vad",
- },
- )
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_transcription_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.realtime.with_raw_response.create_transcription_session()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- realtime = await response.parse()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_transcription_session(
- self, async_client: AsyncDigitaloceanGenaiSDK
- ) -> None:
- async with async_client.realtime.with_streaming_response.create_transcription_session() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- realtime = await response.parse()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
deleted file mode 100644
index 4bd7e367..00000000
--- a/tests/api_resources/test_responses.py
+++ /dev/null
@@ -1,479 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- Response,
- ResponseListInputItemsResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestResponses:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.create(
- input="string",
- model="gpt-4o",
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.create(
- input="string",
- model="gpt-4o",
- include=["file_search_call.results"],
- instructions="instructions",
- max_output_tokens=0,
- metadata={"foo": "string"},
- parallel_tool_calls=True,
- previous_response_id="previous_response_id",
- reasoning={
- "effort": "low",
- "generate_summary": "concise",
- },
- store=True,
- stream=True,
- temperature=1,
- text={"format": {"type": "text"}},
- tool_choice="none",
- tools=[
- {
- "type": "file_search",
- "vector_store_ids": ["string"],
- "filters": {
- "key": "key",
- "type": "eq",
- "value": "string",
- },
- "max_num_results": 0,
- "ranking_options": {
- "ranker": "auto",
- "score_threshold": 0,
- },
- }
- ],
- top_p=1,
- truncation="auto",
- user="user-1234",
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- http_response = client.responses.with_raw_response.create(
- input="string",
- model="gpt-4o",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.responses.with_streaming_response.create(
- input="string",
- model="gpt-4o",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- include=["file_search_call.results"],
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- http_response = client.responses.with_raw_response.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.responses.with_streaming_response.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- client.responses.with_raw_response.retrieve(
- response_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- )
- assert response is None
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- http_response = client.responses.with_raw_response.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert response is None
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.responses.with_streaming_response.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = http_response.parse()
- assert response is None
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- client.responses.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_input_items(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.list_input_items(
- response_id="response_id",
- )
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_input_items_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.list_input_items(
- response_id="response_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_input_items(self, client: DigitaloceanGenaiSDK) -> None:
- http_response = client.responses.with_raw_response.list_input_items(
- response_id="response_id",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_input_items(self, client: DigitaloceanGenaiSDK) -> None:
- with client.responses.with_streaming_response.list_input_items(
- response_id="response_id",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = http_response.parse()
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list_input_items(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- client.responses.with_raw_response.list_input_items(
- response_id="",
- )
-
-
-class TestAsyncResponses:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.create(
- input="string",
- model="gpt-4o",
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.create(
- input="string",
- model="gpt-4o",
- include=["file_search_call.results"],
- instructions="instructions",
- max_output_tokens=0,
- metadata={"foo": "string"},
- parallel_tool_calls=True,
- previous_response_id="previous_response_id",
- reasoning={
- "effort": "low",
- "generate_summary": "concise",
- },
- store=True,
- stream=True,
- temperature=1,
- text={"format": {"type": "text"}},
- tool_choice="none",
- tools=[
- {
- "type": "file_search",
- "vector_store_ids": ["string"],
- "filters": {
- "key": "key",
- "type": "eq",
- "value": "string",
- },
- "max_num_results": 0,
- "ranking_options": {
- "ranker": "auto",
- "score_threshold": 0,
- },
- }
- ],
- top_p=1,
- truncation="auto",
- user="user-1234",
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- http_response = await async_client.responses.with_raw_response.create(
- input="string",
- model="gpt-4o",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.responses.with_streaming_response.create(
- input="string",
- model="gpt-4o",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = await http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- include=["file_search_call.results"],
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- http_response = await async_client.responses.with_raw_response.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.responses.with_streaming_response.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = await http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- await async_client.responses.with_raw_response.retrieve(
- response_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- )
- assert response is None
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- http_response = await async_client.responses.with_raw_response.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert response is None
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.responses.with_streaming_response.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = await http_response.parse()
- assert response is None
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- await async_client.responses.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.list_input_items(
- response_id="response_id",
- )
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_input_items_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.list_input_items(
- response_id="response_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- http_response = await async_client.responses.with_raw_response.list_input_items(
- response_id="response_id",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.responses.with_streaming_response.list_input_items(
- response_id="response_id",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = await http_response.parse()
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- await async_client.responses.with_raw_response.list_input_items(
- response_id="",
- )
diff --git a/tests/api_resources/test_threads.py b/tests/api_resources/test_threads.py
deleted file mode 100644
index cca5e067..00000000
--- a/tests/api_resources/test_threads.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import ThreadObject, ThreadDeleteResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestThreads:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.create()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.create(
- messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.retrieve(
- "thread_id",
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.with_raw_response.retrieve(
- "thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.with_streaming_response.retrieve(
- "thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.update(
- thread_id="thread_id",
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.update(
- thread_id="thread_id",
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.with_raw_response.update(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.with_streaming_response.update(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.with_raw_response.update(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.delete(
- "thread_id",
- )
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.with_raw_response.delete(
- "thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.with_streaming_response.delete(
- "thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = response.parse()
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncThreads:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.create()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.create(
- messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.retrieve(
- "thread_id",
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.with_raw_response.retrieve(
- "thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.with_streaming_response.retrieve(
- "thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.update(
- thread_id="thread_id",
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.update(
- thread_id="thread_id",
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.with_raw_response.update(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.with_streaming_response.update(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.with_raw_response.update(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.delete(
- "thread_id",
- )
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.with_raw_response.delete(
- "thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.with_streaming_response.delete(
- "thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = await response.parse()
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py
deleted file mode 100644
index 35f52730..00000000
--- a/tests/api_resources/test_uploads.py
+++ /dev/null
@@ -1,399 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- Upload,
- UploadAddPartResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestUploads:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.uploads.with_raw_response.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.uploads.with_streaming_response.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_add_part(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- )
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_add_part(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.uploads.with_raw_response.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = response.parse()
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_add_part(self, client: DigitaloceanGenaiSDK) -> None:
- with client.uploads.with_streaming_response.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = response.parse()
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_add_part(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- client.uploads.with_raw_response.add_part(
- upload_id="",
- data=b"raw file contents",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.cancel(
- "upload_abc123",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.uploads.with_raw_response.cancel(
- "upload_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.uploads.with_streaming_response.cancel(
- "upload_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- client.uploads.with_raw_response.cancel(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_complete(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_complete_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- md5="md5",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_complete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.uploads.with_raw_response.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_complete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.uploads.with_streaming_response.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_complete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- client.uploads.with_raw_response.complete(
- upload_id="",
- part_ids=["string"],
- )
-
-
-class TestAsyncUploads:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.uploads.with_raw_response.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.uploads.with_streaming_response.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- )
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.uploads.with_raw_response.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = await response.parse()
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.uploads.with_streaming_response.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = await response.parse()
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- await async_client.uploads.with_raw_response.add_part(
- upload_id="",
- data=b"raw file contents",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.cancel(
- "upload_abc123",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.uploads.with_raw_response.cancel(
- "upload_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.uploads.with_streaming_response.cancel(
- "upload_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- await async_client.uploads.with_raw_response.cancel(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_complete_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- md5="md5",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.uploads.with_raw_response.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.uploads.with_streaming_response.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- await async_client.uploads.with_raw_response.complete(
- upload_id="",
- part_ids=["string"],
- )
diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py
deleted file mode 100644
index 1c8b5fb0..00000000
--- a/tests/api_resources/test_vector_stores.py
+++ /dev/null
@@ -1,603 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- VectorStoreObject,
- VectorStoreListResponse,
- VectorStoreDeleteResponse,
- VectorStoreSearchResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestVectorStores:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.create()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.create(
- chunking_strategy={"type": "auto"},
- expires_after={
- "anchor": "last_active_at",
- "days": 1,
- },
- file_ids=["string"],
- metadata={"foo": "string"},
- name="name",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.retrieve(
- "vector_store_id",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.retrieve(
- "vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.retrieve(
- "vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.update(
- vector_store_id="vector_store_id",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.update(
- vector_store_id="vector_store_id",
- expires_after={
- "anchor": "last_active_at",
- "days": 1,
- },
- metadata={"foo": "string"},
- name="name",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.update(
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.update(
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.with_raw_response.update(
- vector_store_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.list()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.list(
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.delete(
- "vector_store_id",
- )
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.delete(
- "vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.delete(
- "vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_search(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.search(
- vector_store_id="vs_abc123",
- query="string",
- )
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_search_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.search(
- vector_store_id="vs_abc123",
- query="string",
- filters={
- "key": "key",
- "type": "eq",
- "value": "string",
- },
- max_num_results=1,
- ranking_options={
- "ranker": "auto",
- "score_threshold": 0,
- },
- rewrite_query=True,
- )
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_search(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.search(
- vector_store_id="vs_abc123",
- query="string",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_search(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.search(
- vector_store_id="vs_abc123",
- query="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_search(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.with_raw_response.search(
- vector_store_id="",
- query="string",
- )
-
-
-class TestAsyncVectorStores:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.create()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.create(
- chunking_strategy={"type": "auto"},
- expires_after={
- "anchor": "last_active_at",
- "days": 1,
- },
- file_ids=["string"],
- metadata={"foo": "string"},
- name="name",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.retrieve(
- "vector_store_id",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.retrieve(
- "vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.retrieve(
- "vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.update(
- vector_store_id="vector_store_id",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.update(
- vector_store_id="vector_store_id",
- expires_after={
- "anchor": "last_active_at",
- "days": 1,
- },
- metadata={"foo": "string"},
- name="name",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.update(
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.update(
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.with_raw_response.update(
- vector_store_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.list()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.list(
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.delete(
- "vector_store_id",
- )
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.delete(
- "vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.delete(
- "vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.search(
- vector_store_id="vs_abc123",
- query="string",
- )
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_search_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.search(
- vector_store_id="vs_abc123",
- query="string",
- filters={
- "key": "key",
- "type": "eq",
- "value": "string",
- },
- max_num_results=1,
- ranking_options={
- "ranker": "auto",
- "score_threshold": 0,
- },
- rewrite_query=True,
- )
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.search(
- vector_store_id="vs_abc123",
- query="string",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.search(
- vector_store_id="vs_abc123",
- query="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.with_raw_response.search(
- vector_store_id="",
- query="string",
- )
diff --git a/tests/api_resources/threads/__init__.py b/tests/api_resources/threads/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/threads/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/threads/runs/__init__.py b/tests/api_resources/threads/runs/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/threads/runs/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/threads/runs/test_steps.py b/tests/api_resources/threads/runs/test_steps.py
deleted file mode 100644
index e972e952..00000000
--- a/tests/api_resources/threads/runs/test_steps.py
+++ /dev/null
@@ -1,307 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.threads.runs import (
- RunStepObject,
- StepListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestSteps:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- step = client.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- )
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- step = client.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- )
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = response.parse()
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.steps.with_streaming_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = response.parse()
- assert_matches_type(RunStepObject, step, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="",
- run_id="run_id",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
- client.threads.runs.steps.with_raw_response.retrieve(
- step_id="",
- thread_id="thread_id",
- run_id="run_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- step = client.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- step = client.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- after="after",
- before="before",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- limit=0,
- order="asc",
- )
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.steps.with_raw_response.list(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = response.parse()
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.steps.with_streaming_response.list(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = response.parse()
- assert_matches_type(StepListResponse, step, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.steps.with_raw_response.list(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.steps.with_raw_response.list(
- run_id="",
- thread_id="thread_id",
- )
-
-
-class TestAsyncSteps:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- step = await async_client.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- )
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- step = await async_client.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- )
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = await response.parse()
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.steps.with_streaming_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = await response.parse()
- assert_matches_type(RunStepObject, step, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="",
- run_id="run_id",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.retrieve(
- step_id="",
- thread_id="thread_id",
- run_id="run_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- step = await async_client.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- step = await async_client.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- after="after",
- before="before",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- limit=0,
- order="asc",
- )
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.steps.with_raw_response.list(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = await response.parse()
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.steps.with_streaming_response.list(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = await response.parse()
- assert_matches_type(StepListResponse, step, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.list(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.list(
- run_id="",
- thread_id="thread_id",
- )
diff --git a/tests/api_resources/threads/test_messages.py b/tests/api_resources/threads/test_messages.py
deleted file mode 100644
index e1aaf51e..00000000
--- a/tests/api_resources/threads/test_messages.py
+++ /dev/null
@@ -1,602 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.threads import (
- MessageObject,
- MessageListResponse,
- MessageDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestMessages:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.create(
- thread_id="thread_id",
- content="string",
- role="user",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.create(
- thread_id="thread_id",
- content="string",
- role="user",
- attachments=[
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- metadata={"foo": "string"},
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.create(
- thread_id="thread_id",
- content="string",
- role="user",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.create(
- thread_id="thread_id",
- content="string",
- role="user",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.create(
- thread_id="",
- content="string",
- role="user",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.retrieve(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.threads.messages.with_raw_response.retrieve(
- message_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.update(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.update(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.update(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.threads.messages.with_raw_response.update(
- message_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.list(
- thread_id="thread_id",
- )
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.list(
- thread_id="thread_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- run_id="run_id",
- )
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.list(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.list(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.list(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.delete(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.delete(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.delete(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.delete(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.threads.messages.with_raw_response.delete(
- message_id="",
- thread_id="thread_id",
- )
-
-
-class TestAsyncMessages:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.create(
- thread_id="thread_id",
- content="string",
- role="user",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.create(
- thread_id="thread_id",
- content="string",
- role="user",
- attachments=[
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- metadata={"foo": "string"},
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.create(
- thread_id="thread_id",
- content="string",
- role="user",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.create(
- thread_id="thread_id",
- content="string",
- role="user",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.create(
- thread_id="",
- content="string",
- role="user",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.retrieve(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.threads.messages.with_raw_response.retrieve(
- message_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.update(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.update(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.update(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.threads.messages.with_raw_response.update(
- message_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.list(
- thread_id="thread_id",
- )
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.list(
- thread_id="thread_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- run_id="run_id",
- )
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.list(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.list(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.list(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.delete(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.delete(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.delete(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.delete(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.threads.messages.with_raw_response.delete(
- message_id="",
- thread_id="thread_id",
- )
diff --git a/tests/api_resources/threads/test_runs.py b/tests/api_resources/threads/test_runs.py
deleted file mode 100644
index 59716b5e..00000000
--- a/tests/api_resources/threads/test_runs.py
+++ /dev/null
@@ -1,967 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.threads import (
- RunObject,
- RunListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestRuns:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.create(
- assistant_id="assistant_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.create(
- assistant_id="assistant_id",
- instructions="instructions",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="gpt-4o",
- parallel_tool_calls=True,
- response_format="auto",
- stream=True,
- temperature=1,
- thread={
- "messages": [
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- "metadata": {"foo": "string"},
- "tool_resources": {
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- },
- tool_choice="none",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.create(
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.create(
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.retrieve(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.with_raw_response.retrieve(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.update(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.update(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.update(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.with_raw_response.update(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.list(
- thread_id="thread_id",
- )
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.list(
- thread_id="thread_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.list(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.list(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunListResponse, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.list(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.cancel(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.cancel(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.cancel(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.cancel(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.with_raw_response.cancel(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_run(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_run_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- additional_instructions="additional_instructions",
- additional_messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- instructions="instructions",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="gpt-4o",
- parallel_tool_calls=True,
- reasoning_effort="low",
- response_format="auto",
- stream=True,
- temperature=1,
- tool_choice="none",
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_run(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_run(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create_run(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.create_run(
- thread_id="",
- assistant_id="assistant_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_submit_tool_outputs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[
- {
- "output": "output",
- "tool_call_id": "tool_call_id",
- }
- ],
- stream=True,
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="",
- tool_outputs=[{}],
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
-
-
-class TestAsyncRuns:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.create(
- assistant_id="assistant_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.create(
- assistant_id="assistant_id",
- instructions="instructions",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="gpt-4o",
- parallel_tool_calls=True,
- response_format="auto",
- stream=True,
- temperature=1,
- thread={
- "messages": [
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- "metadata": {"foo": "string"},
- "tool_resources": {
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- },
- tool_choice="none",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.create(
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.create(
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.retrieve(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.with_raw_response.retrieve(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.update(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.update(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.update(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.with_raw_response.update(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.list(
- thread_id="thread_id",
- )
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.list(
- thread_id="thread_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.list(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.list(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunListResponse, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.list(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.cancel(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.cancel(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.cancel(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.cancel(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.with_raw_response.cancel(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_run_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- additional_instructions="additional_instructions",
- additional_messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- instructions="instructions",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="gpt-4o",
- parallel_tool_calls=True,
- reasoning_effort="low",
- response_format="auto",
- stream=True,
- temperature=1,
- tool_choice="none",
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.create_run(
- thread_id="",
- assistant_id="assistant_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_submit_tool_outputs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[
- {
- "output": "output",
- "tool_call_id": "tool_call_id",
- }
- ],
- stream=True,
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="",
- tool_outputs=[{}],
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
diff --git a/tests/api_resources/vector_stores/__init__.py b/tests/api_resources/vector_stores/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/vector_stores/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/vector_stores/test_file_batches.py b/tests/api_resources/vector_stores/test_file_batches.py
deleted file mode 100644
index 47897412..00000000
--- a/tests/api_resources/vector_stores/test_file_batches.py
+++ /dev/null
@@ -1,479 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.vector_stores import (
- VectorStoreFileBatchObject,
- ListVectorStoreFilesResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestFileBatches:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- attributes={"foo": "string"},
- chunking_strategy={"type": "auto"},
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.file_batches.with_raw_response.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.file_batches.with_streaming_response.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.create(
- vector_store_id="",
- file_ids=["string"],
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.file_batches.with_streaming_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="",
- vector_store_id="vs_abc123",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.file_batches.with_streaming_response.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="batch_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="",
- vector_store_id="vector_store_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_files(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_files_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="in_progress",
- limit=0,
- order="asc",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_files(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_files(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.file_batches.with_streaming_response.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list_files(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="batch_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="",
- vector_store_id="vector_store_id",
- )
-
-
-class TestAsyncFileBatches:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- attributes={"foo": "string"},
- chunking_strategy={"type": "auto"},
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.create(
- vector_store_id="",
- file_ids=["string"],
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="",
- vector_store_id="vs_abc123",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="batch_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="",
- vector_store_id="vector_store_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_files_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="in_progress",
- limit=0,
- order="asc",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = await response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = await response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="batch_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="",
- vector_store_id="vector_store_id",
- )
diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py
deleted file mode 100644
index b93fe1b4..00000000
--- a/tests/api_resources/vector_stores/test_files.py
+++ /dev/null
@@ -1,677 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.vector_stores import (
- FileDeleteResponse,
- VectorStoreFileObject,
- FileRetrieveContentResponse,
- ListVectorStoreFilesResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestFiles:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- attributes={"foo": "string"},
- chunking_strategy={"type": "auto"},
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.create(
- vector_store_id="",
- file_id="file_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.retrieve(
- file_id="file-abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.vector_stores.files.with_raw_response.retrieve(
- file_id="",
- vector_store_id="vs_abc123",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.update(
- file_id="file-abc123",
- vector_store_id="",
- attributes={"foo": "string"},
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.vector_stores.files.with_raw_response.update(
- file_id="",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.list(
- vector_store_id="vector_store_id",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.list(
- vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="in_progress",
- limit=0,
- order="asc",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.list(
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.list(
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.list(
- vector_store_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.delete(
- file_id="file_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.vector_stores.files.with_raw_response.delete(
- file_id="",
- vector_store_id="vector_store_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="",
- vector_store_id="vs_abc123",
- )
-
-
-class TestAsyncFiles:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- attributes={"foo": "string"},
- chunking_strategy={"type": "auto"},
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.create(
- vector_store_id="",
- file_id="file_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.retrieve(
- file_id="file-abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.retrieve(
- file_id="",
- vector_store_id="vs_abc123",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.update(
- file_id="file-abc123",
- vector_store_id="",
- attributes={"foo": "string"},
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.update(
- file_id="",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.list(
- vector_store_id="vector_store_id",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.list(
- vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="in_progress",
- limit=0,
- order="asc",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.list(
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.list(
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.list(
- vector_store_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.delete(
- file_id="file_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.delete(
- file_id="",
- vector_store_id="vector_store_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="",
- vector_store_id="vs_abc123",
- )
diff --git a/tests/test_client.py b/tests/test_client.py
index c13403e3..7ac3aae1 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -23,20 +23,17 @@
from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK, APIResponseValidationError
from digitalocean_genai_sdk._types import Omit
+from digitalocean_genai_sdk._utils import maybe_transform
from digitalocean_genai_sdk._models import BaseModel, FinalRequestOptions
from digitalocean_genai_sdk._constants import RAW_RESPONSE_HEADER
-from digitalocean_genai_sdk._exceptions import (
- APIStatusError,
- APITimeoutError,
- DigitaloceanGenaiSDKError,
- APIResponseValidationError,
-)
+from digitalocean_genai_sdk._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
from digitalocean_genai_sdk._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
BaseClient,
make_request_options,
)
+from digitalocean_genai_sdk.types.chat.completion_create_params import CompletionCreateParams
from .utils import update_env
@@ -339,16 +336,6 @@ def test_default_headers_option(self) -> None:
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
- def test_validate_headers(self) -> None:
- client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
- assert request.headers.get("Authorization") == f"Bearer {api_key}"
-
- with pytest.raises(DigitaloceanGenaiSDKError):
- with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}):
- client2 = DigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True)
- _ = client2
-
def test_default_query_option(self) -> None:
client = DigitaloceanGenaiSDK(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
@@ -727,20 +714,58 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str
@mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.get("/assistants").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- self.client.get("/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}})
+ self.client.post(
+ "/chat/completions",
+ body=cast(
+ object,
+ maybe_transform(
+ dict(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ),
+ CompletionCreateParams,
+ ),
+ ),
+ cast_to=httpx.Response,
+ options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
+ )
assert _get_open_connections(self.client) == 0
@mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.get("/assistants").mock(return_value=httpx.Response(500))
+ respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- self.client.get("/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}})
+ self.client.post(
+ "/chat/completions",
+ body=cast(
+ object,
+ maybe_transform(
+ dict(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ),
+ CompletionCreateParams,
+ ),
+ ),
+ cast_to=httpx.Response,
+ options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
+ )
assert _get_open_connections(self.client) == 0
@@ -768,9 +793,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = client.assistants.with_raw_response.list()
+ response = client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@@ -792,9 +825,18 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
-
- response = client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()})
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
+
+ response = client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ extra_headers={"x-stainless-retry-count": Omit()},
+ )
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@@ -815,12 +857,48 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
-
- response = client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"})
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
+
+ response = client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ extra_headers={"x-stainless-retry-count": "42"},
+ )
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
+ @pytest.mark.respx(base_url=base_url)
+ def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+ # Test that the default follow_redirects=True allows following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+ respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+ response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ assert response.status_code == 200
+ assert response.json() == {"status": "ok"}
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+ # Test that follow_redirects=False prevents following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+
+ with pytest.raises(APIStatusError) as exc_info:
+ self.client.post(
+ "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
+ )
+
+ assert exc_info.value.response.status_code == 302
+ assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
+
class TestAsyncDigitaloceanGenaiSDK:
client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
@@ -1101,16 +1179,6 @@ def test_default_headers_option(self) -> None:
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
- def test_validate_headers(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
- assert request.headers.get("Authorization") == f"Bearer {api_key}"
-
- with pytest.raises(DigitaloceanGenaiSDKError):
- with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}):
- client2 = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True)
- _ = client2
-
def test_default_query_option(self) -> None:
client = AsyncDigitaloceanGenaiSDK(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
@@ -1493,11 +1561,28 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte
@mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.get("/assistants").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- await self.client.get(
- "/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}
+ await self.client.post(
+ "/chat/completions",
+ body=cast(
+ object,
+ maybe_transform(
+ dict(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ),
+ CompletionCreateParams,
+ ),
+ ),
+ cast_to=httpx.Response,
+ options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
)
assert _get_open_connections(self.client) == 0
@@ -1505,11 +1590,28 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter)
@mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.get("/assistants").mock(return_value=httpx.Response(500))
+ respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- await self.client.get(
- "/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}
+ await self.client.post(
+ "/chat/completions",
+ body=cast(
+ object,
+ maybe_transform(
+ dict(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ),
+ CompletionCreateParams,
+ ),
+ ),
+ cast_to=httpx.Response,
+ options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
)
assert _get_open_connections(self.client) == 0
@@ -1539,9 +1641,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = await client.assistants.with_raw_response.list()
+ response = await client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@@ -1564,9 +1674,18 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
-
- response = await client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()})
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
+
+ response = await client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ extra_headers={"x-stainless-retry-count": Omit()},
+ )
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@@ -1588,9 +1707,18 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
-
- response = await client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"})
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
+
+ response = await client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ extra_headers={"x-stainless-retry-count": "42"},
+ )
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
@@ -1638,3 +1766,30 @@ async def test_main() -> None:
raise AssertionError("calling get_platform using asyncify resulted in a hung process")
time.sleep(0.1)
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+ # Test that the default follow_redirects=True allows following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+ respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+ response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ assert response.status_code == 200
+ assert response.json() == {"status": "ok"}
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+ # Test that follow_redirects=False prevents following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+
+ with pytest.raises(APIStatusError) as exc_info:
+ await self.client.post(
+ "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
+ )
+
+ assert exc_info.value.response.status_code == 302
+ assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"